2024-11-20 04:27:46,661 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-20 04:27:46,672 main DEBUG Took 0.009278 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 04:27:46,673 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 04:27:46,673 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 04:27:46,674 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 04:27:46,675 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,681 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 04:27:46,692 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,694 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,694 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,695 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,695 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,695 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,696 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,696 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,697 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,697 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,698 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,698 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,698 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,699 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,699 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,699 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,700 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,700 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,701 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,701 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,701 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,701 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,702 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,702 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 04:27:46,703 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,703 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 04:27:46,704 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 04:27:46,705 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 04:27:46,707 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 04:27:46,707 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 04:27:46,708 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 04:27:46,709 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 04:27:46,717 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 04:27:46,719 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 04:27:46,721 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 04:27:46,721 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 04:27:46,722 main DEBUG createAppenders(={Console}) 2024-11-20 04:27:46,722 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-20 04:27:46,722 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-20 04:27:46,723 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-20 04:27:46,723 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 04:27:46,723 main DEBUG OutputStream closed 2024-11-20 04:27:46,724 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 04:27:46,724 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 04:27:46,724 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-20 04:27:46,799 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 04:27:46,802 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 04:27:46,803 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 04:27:46,804 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 04:27:46,805 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 04:27:46,805 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 04:27:46,805 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 04:27:46,805 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 04:27:46,806 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 04:27:46,806 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 04:27:46,806 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 04:27:46,807 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 04:27:46,807 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 04:27:46,807 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 04:27:46,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 04:27:46,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 04:27:46,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 04:27:46,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 04:27:46,811 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 04:27:46,812 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-20 04:27:46,812 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 04:27:46,813 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-20T04:27:47,058 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b 2024-11-20 04:27:47,061 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 04:27:47,061 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T04:27:47,070 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-20T04:27:47,101 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=208, ProcessCount=11, AvailableMemoryMB=7583 2024-11-20T04:27:47,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T04:27:47,120 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae, deleteOnExit=true 2024-11-20T04:27:47,120 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T04:27:47,121 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/test.cache.data in system properties and HBase conf 2024-11-20T04:27:47,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T04:27:47,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.log.dir in system properties and HBase conf 2024-11-20T04:27:47,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T04:27:47,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T04:27:47,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T04:27:47,217 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T04:27:47,302 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T04:27:47,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:27:47,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:27:47,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T04:27:47,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:27:47,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T04:27:47,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T04:27:47,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:27:47,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:27:47,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T04:27:47,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/nfs.dump.dir in system properties and HBase conf 2024-11-20T04:27:47,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/java.io.tmpdir in system properties and HBase conf 2024-11-20T04:27:47,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:27:47,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T04:27:47,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T04:27:47,806 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:27:48,185 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T04:27:48,260 INFO [Time-limited test {}] log.Log(170): Logging initialized @2337ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T04:27:48,333 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:27:48,392 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:27:48,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:27:48,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:27:48,412 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:27:48,424 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:27:48,427 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:27:48,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:27:48,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/java.io.tmpdir/jetty-localhost-33047-hadoop-hdfs-3_4_1-tests_jar-_-any-17868510603301011295/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:27:48,636 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:33047} 2024-11-20T04:27:48,637 INFO [Time-limited test {}] server.Server(415): Started @2715ms 2024-11-20T04:27:48,662 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:27:49,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:27:49,044 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:27:49,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:27:49,046 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:27:49,046 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:27:49,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:27:49,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:27:49,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/java.io.tmpdir/jetty-localhost-40723-hadoop-hdfs-3_4_1-tests_jar-_-any-6851117655698353149/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:27:49,169 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:40723} 2024-11-20T04:27:49,169 INFO [Time-limited test {}] server.Server(415): Started @3248ms 2024-11-20T04:27:49,225 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:27:49,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:27:49,355 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:27:49,357 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:27:49,357 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:27:49,357 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:27:49,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:27:49,360 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:27:49,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/java.io.tmpdir/jetty-localhost-44921-hadoop-hdfs-3_4_1-tests_jar-_-any-6881548637867616709/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:27:49,483 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:44921} 2024-11-20T04:27:49,483 INFO [Time-limited test {}] server.Server(415): Started @3561ms 2024-11-20T04:27:49,486 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:27:49,661 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/data/data3/current/BP-1006445382-172.17.0.2-1732076867909/current, will proceed with Du for space computation calculation, 2024-11-20T04:27:49,661 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/data/data1/current/BP-1006445382-172.17.0.2-1732076867909/current, will proceed with Du for space computation calculation, 2024-11-20T04:27:49,661 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/data/data2/current/BP-1006445382-172.17.0.2-1732076867909/current, will proceed with Du for space computation calculation, 2024-11-20T04:27:49,661 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/data/data4/current/BP-1006445382-172.17.0.2-1732076867909/current, will proceed with Du for space computation calculation, 2024-11-20T04:27:49,751 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:27:49,764 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:27:49,866 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x32d7e999c33f417d with lease ID 0xdca45b58ffdbc07e: Processing first storage report for DS-5728b753-b1f4-44b5-879e-b8c868801182 from datanode DatanodeRegistration(127.0.0.1:44703, datanodeUuid=ddd74eb2-988d-44d4-9d97-d38c0552df74, infoPort=34091, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=682788265;c=1732076867909) 2024-11-20T04:27:49,868 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32d7e999c33f417d with lease ID 0xdca45b58ffdbc07e: from storage DS-5728b753-b1f4-44b5-879e-b8c868801182 node DatanodeRegistration(127.0.0.1:44703, datanodeUuid=ddd74eb2-988d-44d4-9d97-d38c0552df74, infoPort=34091, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=682788265;c=1732076867909), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-20T04:27:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x544b69dffb316800 with lease ID 0xdca45b58ffdbc07f: Processing first storage report for DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f from datanode DatanodeRegistration(127.0.0.1:34821, datanodeUuid=0ba43b70-ff8f-46fb-8a05-5888746f784f, infoPort=37107, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=682788265;c=1732076867909) 2024-11-20T04:27:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x544b69dffb316800 with lease ID 0xdca45b58ffdbc07f: from storage DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f node DatanodeRegistration(127.0.0.1:34821, datanodeUuid=0ba43b70-ff8f-46fb-8a05-5888746f784f, infoPort=37107, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=682788265;c=1732076867909), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T04:27:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x32d7e999c33f417d with lease ID 0xdca45b58ffdbc07e: Processing first storage report for DS-bfbb158a-851f-41ef-a8cc-78260952ce15 from datanode DatanodeRegistration(127.0.0.1:44703, datanodeUuid=ddd74eb2-988d-44d4-9d97-d38c0552df74, infoPort=34091, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=682788265;c=1732076867909) 2024-11-20T04:27:49,870 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32d7e999c33f417d with lease ID 0xdca45b58ffdbc07e: from storage DS-bfbb158a-851f-41ef-a8cc-78260952ce15 node DatanodeRegistration(127.0.0.1:44703, datanodeUuid=ddd74eb2-988d-44d4-9d97-d38c0552df74, infoPort=34091, infoSecurePort=0, ipcPort=41757, storageInfo=lv=-57;cid=testClusterID;nsid=682788265;c=1732076867909), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:27:49,870 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x544b69dffb316800 with lease ID 0xdca45b58ffdbc07f: Processing first storage report for DS-b32decd3-7297-4277-a6a5-5b47ab0390aa from datanode DatanodeRegistration(127.0.0.1:34821, datanodeUuid=0ba43b70-ff8f-46fb-8a05-5888746f784f, infoPort=37107, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=682788265;c=1732076867909) 2024-11-20T04:27:49,870 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x544b69dffb316800 with lease ID 0xdca45b58ffdbc07f: from storage DS-b32decd3-7297-4277-a6a5-5b47ab0390aa node DatanodeRegistration(127.0.0.1:34821, datanodeUuid=0ba43b70-ff8f-46fb-8a05-5888746f784f, infoPort=37107, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=682788265;c=1732076867909), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:27:49,922 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b 2024-11-20T04:27:49,995 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/zookeeper_0, clientPort=51126, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T04:27:50,005 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51126 2024-11-20T04:27:50,015 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:27:50,018 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:27:50,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:27:50,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:27:50,666 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c with version=8 2024-11-20T04:27:50,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/hbase-staging 2024-11-20T04:27:50,756 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T04:27:51,003 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:27:51,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:27:51,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:27:51,020 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:27:51,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:27:51,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:27:51,168 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T04:27:51,227 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T04:27:51,235 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T04:27:51,239 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:27:51,265 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20160 (auto-detected) 2024-11-20T04:27:51,267 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T04:27:51,285 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43451 2024-11-20T04:27:51,306 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43451 connecting to ZooKeeper ensemble=127.0.0.1:51126 2024-11-20T04:27:51,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:434510x0, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:27:51,339 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43451-0x10133a183e90000 connected 2024-11-20T04:27:51,367 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:27:51,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:27:51,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:27:51,383 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c, hbase.cluster.distributed=false 2024-11-20T04:27:51,406 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:27:51,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43451 2024-11-20T04:27:51,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43451 2024-11-20T04:27:51,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43451 2024-11-20T04:27:51,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43451 2024-11-20T04:27:51,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43451 2024-11-20T04:27:51,527 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:27:51,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:27:51,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:27:51,529 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:27:51,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:27:51,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:27:51,532 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T04:27:51,534 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:27:51,535 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35243 2024-11-20T04:27:51,537 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35243 connecting to ZooKeeper ensemble=127.0.0.1:51126 2024-11-20T04:27:51,538 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:27:51,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:27:51,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:352430x0, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:27:51,550 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35243-0x10133a183e90001 connected 2024-11-20T04:27:51,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:27:51,554 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T04:27:51,562 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T04:27:51,564 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T04:27:51,568 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:27:51,569 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35243 2024-11-20T04:27:51,570 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35243 2024-11-20T04:27:51,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35243 2024-11-20T04:27:51,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35243 2024-11-20T04:27:51,573 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35243 2024-11-20T04:27:51,589 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c2a32e16c274:43451 2024-11-20T04:27:51,590 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c2a32e16c274,43451,1732076870808 2024-11-20T04:27:51,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:27:51,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:27:51,600 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c2a32e16c274,43451,1732076870808 2024-11-20T04:27:51,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T04:27:51,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:51,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:51,622 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T04:27:51,623 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c2a32e16c274,43451,1732076870808 from backup master directory 2024-11-20T04:27:51,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c2a32e16c274,43451,1732076870808 2024-11-20T04:27:51,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:27:51,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:27:51,627 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:27:51,627 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c2a32e16c274,43451,1732076870808 2024-11-20T04:27:51,629 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T04:27:51,631 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T04:27:51,687 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/hbase.id] with ID: 07532208-c17a-4cbd-98c8-f5b0f9af7814 2024-11-20T04:27:51,688 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/.tmp/hbase.id 2024-11-20T04:27:51,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:27:51,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:27:51,701 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/.tmp/hbase.id]:[hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/hbase.id] 2024-11-20T04:27:51,742 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:27:51,747 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T04:27:51,765 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-20T04:27:51,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:51,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:51,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:27:51,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:27:51,802 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:27:51,804 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T04:27:51,810 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:27:51,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:27:51,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:27:51,869 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store 2024-11-20T04:27:51,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:27:51,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:27:51,897 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T04:27:51,901 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:27:51,902 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:27:51,903 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:27:51,903 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:27:51,905 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:27:51,905 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:27:51,905 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:27:51,906 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732076871902Disabling compacts and flushes for region at 1732076871902Disabling writes for close at 1732076871905 (+3 ms)Writing region close event to WAL at 1732076871905Closed at 1732076871905 2024-11-20T04:27:51,908 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/.initializing 2024-11-20T04:27:51,909 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/WALs/c2a32e16c274,43451,1732076870808 2024-11-20T04:27:51,934 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C43451%2C1732076870808, suffix=, logDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/WALs/c2a32e16c274,43451,1732076870808, archiveDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/oldWALs, maxLogs=10 2024-11-20T04:27:51,944 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C43451%2C1732076870808.1732076871939 2024-11-20T04:27:51,965 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/WALs/c2a32e16c274,43451,1732076870808/c2a32e16c274%2C43451%2C1732076870808.1732076871939 2024-11-20T04:27:51,975 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37107:37107),(127.0.0.1/127.0.0.1:34091:34091)] 2024-11-20T04:27:51,977 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:27:51,977 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:27:51,981 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:51,983 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T04:27:52,057 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:52,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:27:52,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T04:27:52,064 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:52,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:27:52,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T04:27:52,069 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:52,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:27:52,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,073 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T04:27:52,073 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:52,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:27:52,075 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,078 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,079 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,084 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,085 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,088 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T04:27:52,091 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:27:52,096 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:27:52,098 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748875, jitterRate=-0.047757357358932495}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T04:27:52,105 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732076871997Initializing all the Stores at 1732076871999 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076872000 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076872001 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076872001Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076872001Cleaning up temporary data from old regions at 1732076872085 (+84 ms)Region opened successfully at 1732076872105 (+20 ms) 2024-11-20T04:27:52,107 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T04:27:52,142 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22eaba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:27:52,175 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T04:27:52,186 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T04:27:52,187 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T04:27:52,190 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T04:27:52,191 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T04:27:52,196 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-20T04:27:52,196 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T04:27:52,227 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T04:27:52,237 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T04:27:52,239 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T04:27:52,242 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T04:27:52,244 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T04:27:52,246 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T04:27:52,249 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T04:27:52,252 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T04:27:52,253 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T04:27:52,255 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T04:27:52,256 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T04:27:52,272 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T04:27:52,275 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T04:27:52,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:27:52,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:27:52,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:52,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:52,282 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c2a32e16c274,43451,1732076870808, sessionid=0x10133a183e90000, setting cluster-up flag (Was=false) 2024-11-20T04:27:52,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:52,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:52,302 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T04:27:52,304 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,43451,1732076870808 2024-11-20T04:27:52,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:52,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:52,316 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T04:27:52,318 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,43451,1732076870808 2024-11-20T04:27:52,325 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T04:27:52,377 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(746): ClusterId : 07532208-c17a-4cbd-98c8-f5b0f9af7814 2024-11-20T04:27:52,380 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T04:27:52,387 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T04:27:52,387 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T04:27:52,390 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T04:27:52,390 DEBUG [RS:0;c2a32e16c274:35243 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fd80656, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:27:52,404 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T04:27:52,406 DEBUG [RS:0;c2a32e16c274:35243 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c2a32e16c274:35243 2024-11-20T04:27:52,409 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T04:27:52,409 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T04:27:52,409 DEBUG [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T04:27:52,412 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2a32e16c274,43451,1732076870808 with port=35243, startcode=1732076871487 2024-11-20T04:27:52,416 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T04:27:52,423 DEBUG [RS:0;c2a32e16c274:35243 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T04:27:52,425 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T04:27:52,432 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c2a32e16c274,43451,1732076870808 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T04:27:52,441 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:27:52,442 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:27:52,442 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:27:52,442 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:27:52,442 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c2a32e16c274:0, corePoolSize=10, maxPoolSize=10 2024-11-20T04:27:52,442 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,443 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:27:52,443 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,448 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:27:52,448 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T04:27:52,453 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732076902453 2024-11-20T04:27:52,454 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:52,454 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T04:27:52,455 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T04:27:52,456 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T04:27:52,461 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T04:27:52,461 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T04:27:52,461 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T04:27:52,462 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T04:27:52,462 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,477 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T04:27:52,478 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T04:27:52,479 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T04:27:52,488 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T04:27:52,488 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T04:27:52,490 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732076872490,5,FailOnTimeoutGroup] 2024-11-20T04:27:52,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:27:52,491 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732076872491,5,FailOnTimeoutGroup] 2024-11-20T04:27:52,491 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,492 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T04:27:52,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:27:52,494 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T04:27:52,494 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,495 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c 2024-11-20T04:27:52,495 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,497 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40223, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T04:27:52,505 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43451 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2a32e16c274,35243,1732076871487 2024-11-20T04:27:52,507 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43451 {}] master.ServerManager(517): Registering regionserver=c2a32e16c274,35243,1732076871487 2024-11-20T04:27:52,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:27:52,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:27:52,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:27:52,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:27:52,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:27:52,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:52,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:27:52,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:27:52,524 DEBUG [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c 2024-11-20T04:27:52,524 DEBUG [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35115 2024-11-20T04:27:52,524 DEBUG [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T04:27:52,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:27:52,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:52,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:27:52,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:27:52,528 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:27:52,528 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:52,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:27:52,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:27:52,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:27:52,530 DEBUG [RS:0;c2a32e16c274:35243 {}] zookeeper.ZKUtil(111): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2a32e16c274,35243,1732076871487 2024-11-20T04:27:52,530 WARN [RS:0;c2a32e16c274:35243 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:27:52,530 INFO [RS:0;c2a32e16c274:35243 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:27:52,531 DEBUG [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487 2024-11-20T04:27:52,532 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:27:52,533 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:52,533 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2a32e16c274,35243,1732076871487] 2024-11-20T04:27:52,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:27:52,545 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:27:52,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740 2024-11-20T04:27:52,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740 2024-11-20T04:27:52,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:27:52,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:27:52,553 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:27:52,556 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:27:52,560 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T04:27:52,561 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:27:52,562 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=793641, jitterRate=0.009167388081550598}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:27:52,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732076872515Initializing all the Stores at 1732076872517 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076872517Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076872517Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076872517Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076872517Cleaning up temporary data from old regions at 1732076872552 (+35 ms)Region opened successfully at 1732076872565 (+13 ms) 2024-11-20T04:27:52,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:27:52,566 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:27:52,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:27:52,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:27:52,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:27:52,568 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:27:52,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732076872566Disabling compacts and flushes for region at 1732076872566Disabling writes for close at 1732076872566Writing region close event to WAL at 1732076872567 (+1 ms)Closed at 1732076872568 (+1 ms) 2024-11-20T04:27:52,572 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:27:52,572 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T04:27:52,578 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T04:27:52,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T04:27:52,584 INFO [RS:0;c2a32e16c274:35243 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T04:27:52,584 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,588 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T04:27:52,590 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:27:52,593 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T04:27:52,595 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T04:27:52,597 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,597 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,597 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,598 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,598 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,598 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,598 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:27:52,598 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,599 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,599 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,599 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,599 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,600 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:27:52,600 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:27:52,600 DEBUG [RS:0;c2a32e16c274:35243 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:27:52,601 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,601 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,601 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,601 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,601 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,602 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,35243,1732076871487-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:27:52,622 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T04:27:52,624 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,35243,1732076871487-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,624 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,624 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.Replication(171): c2a32e16c274,35243,1732076871487 started 2024-11-20T04:27:52,642 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:52,642 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(1482): Serving as c2a32e16c274,35243,1732076871487, RpcServer on c2a32e16c274/172.17.0.2:35243, sessionid=0x10133a183e90001 2024-11-20T04:27:52,643 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T04:27:52,643 DEBUG [RS:0;c2a32e16c274:35243 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2a32e16c274,35243,1732076871487 2024-11-20T04:27:52,644 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,35243,1732076871487' 2024-11-20T04:27:52,644 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T04:27:52,645 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T04:27:52,646 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T04:27:52,646 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T04:27:52,646 DEBUG [RS:0;c2a32e16c274:35243 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2a32e16c274,35243,1732076871487 2024-11-20T04:27:52,646 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,35243,1732076871487' 2024-11-20T04:27:52,646 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T04:27:52,647 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T04:27:52,647 DEBUG [RS:0;c2a32e16c274:35243 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T04:27:52,647 INFO [RS:0;c2a32e16c274:35243 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T04:27:52,647 INFO [RS:0;c2a32e16c274:35243 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T04:27:52,744 WARN [c2a32e16c274:43451 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T04:27:52,756 INFO [RS:0;c2a32e16c274:35243 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C35243%2C1732076871487, suffix=, logDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487, archiveDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs, maxLogs=32 2024-11-20T04:27:52,759 INFO [RS:0;c2a32e16c274:35243 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.1732076872758 2024-11-20T04:27:52,768 INFO [RS:0;c2a32e16c274:35243 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076872758 2024-11-20T04:27:52,771 DEBUG [RS:0;c2a32e16c274:35243 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34091:34091),(127.0.0.1/127.0.0.1:37107:37107)] 2024-11-20T04:27:52,996 DEBUG [c2a32e16c274:43451 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T04:27:53,009 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c2a32e16c274,35243,1732076871487 2024-11-20T04:27:53,016 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,35243,1732076871487, state=OPENING 2024-11-20T04:27:53,021 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T04:27:53,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:53,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:27:53,026 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:27:53,026 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:27:53,027 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:27:53,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,35243,1732076871487}] 2024-11-20T04:27:53,209 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T04:27:53,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44517, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T04:27:53,224 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T04:27:53,225 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:27:53,229 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C35243%2C1732076871487.meta, suffix=.meta, logDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487, archiveDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs, maxLogs=32 2024-11-20T04:27:53,231 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.meta.1732076873231.meta 2024-11-20T04:27:53,240 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.meta.1732076873231.meta 2024-11-20T04:27:53,241 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34091:34091),(127.0.0.1/127.0.0.1:37107:37107)] 2024-11-20T04:27:53,242 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:27:53,244 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T04:27:53,247 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T04:27:53,252 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T04:27:53,256 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T04:27:53,257 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:27:53,257 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T04:27:53,257 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T04:27:53,260 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:27:53,262 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:27:53,262 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:53,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:27:53,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:27:53,265 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:27:53,266 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:53,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:27:53,267 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:27:53,268 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:27:53,268 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:53,269 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:27:53,269 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:27:53,270 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:27:53,270 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:53,271 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:27:53,272 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:27:53,273 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740 2024-11-20T04:27:53,275 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740 2024-11-20T04:27:53,278 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:27:53,278 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:27:53,279 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:27:53,281 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:27:53,283 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862928, jitterRate=0.09727056324481964}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:27:53,283 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T04:27:53,285 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732076873257Writing region info on filesystem at 1732076873258 (+1 ms)Initializing all the Stores at 1732076873260 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076873260Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076873260Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076873260Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076873260Cleaning up temporary data from old regions at 1732076873278 (+18 ms)Running coprocessor post-open hooks at 1732076873283 (+5 ms)Region opened successfully at 1732076873285 (+2 ms) 2024-11-20T04:27:53,292 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732076873200 2024-11-20T04:27:53,308 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,35243,1732076871487 2024-11-20T04:27:53,309 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T04:27:53,309 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T04:27:53,311 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,35243,1732076871487, state=OPEN 2024-11-20T04:27:53,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:27:53,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:27:53,317 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:27:53,317 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:27:53,317 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c2a32e16c274,35243,1732076871487 2024-11-20T04:27:53,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T04:27:53,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,35243,1732076871487 in 288 msec 2024-11-20T04:27:53,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T04:27:53,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 746 msec 2024-11-20T04:27:53,333 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:27:53,333 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T04:27:53,358 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:27:53,359 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,35243,1732076871487, seqNum=-1] 2024-11-20T04:27:53,386 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:27:53,388 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36795, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:27:53,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0540 sec 2024-11-20T04:27:53,411 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732076873411, completionTime=-1 2024-11-20T04:27:53,414 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T04:27:53,415 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T04:27:53,446 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T04:27:53,446 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732076933446 2024-11-20T04:27:53,447 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732076993446 2024-11-20T04:27:53,447 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 32 msec 2024-11-20T04:27:53,449 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,43451,1732076870808-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:53,450 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,43451,1732076870808-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:53,450 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,43451,1732076870808-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:53,452 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c2a32e16c274:43451, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:53,452 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:53,453 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:53,459 DEBUG [master/c2a32e16c274:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T04:27:53,484 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.856sec 2024-11-20T04:27:53,486 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T04:27:53,487 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T04:27:53,488 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T04:27:53,489 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T04:27:53,489 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T04:27:53,490 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,43451,1732076870808-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:27:53,490 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,43451,1732076870808-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T04:27:53,499 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T04:27:53,500 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T04:27:53,501 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,43451,1732076870808-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:27:53,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4731d90b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:27:53,594 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T04:27:53,594 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T04:27:53,599 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c2a32e16c274,43451,-1 for getting cluster id 2024-11-20T04:27:53,603 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T04:27:53,613 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '07532208-c17a-4cbd-98c8-f5b0f9af7814' 2024-11-20T04:27:53,616 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T04:27:53,616 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "07532208-c17a-4cbd-98c8-f5b0f9af7814" 2024-11-20T04:27:53,616 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b07e58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:27:53,616 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c2a32e16c274,43451,-1] 2024-11-20T04:27:53,619 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T04:27:53,621 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:27:53,622 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53454, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T04:27:53,626 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb3700d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:27:53,627 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:27:53,635 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,35243,1732076871487, seqNum=-1] 2024-11-20T04:27:53,635 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:27:53,638 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40792, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:27:53,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c2a32e16c274,43451,1732076870808 2024-11-20T04:27:53,659 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:27:53,667 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T04:27:53,671 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T04:27:53,675 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c2a32e16c274,43451,1732076870808 2024-11-20T04:27:53,679 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3a95d6f2 2024-11-20T04:27:53,680 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T04:27:53,683 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53456, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T04:27:53,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43451 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T04:27:53,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43451 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T04:27:53,688 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43451 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:27:53,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43451 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-20T04:27:53,698 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T04:27:53,700 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43451 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-20T04:27:53,700 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:53,702 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T04:27:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:27:53,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741835_1011 (size=389) 2024-11-20T04:27:53,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741835_1011 (size=389) 2024-11-20T04:27:53,757 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ec0fb3462118dd12411f23256603f7b1, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c 2024-11-20T04:27:53,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741836_1012 (size=72) 2024-11-20T04:27:53,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741836_1012 (size=72) 2024-11-20T04:27:53,769 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:27:53,770 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ec0fb3462118dd12411f23256603f7b1, disabling compactions & flushes 2024-11-20T04:27:53,770 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:27:53,770 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:27:53,770 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. after waiting 0 ms 2024-11-20T04:27:53,770 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:27:53,770 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:27:53,770 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ec0fb3462118dd12411f23256603f7b1: Waiting for close lock at 1732076873769Disabling compacts and flushes for region at 1732076873769Disabling writes for close at 1732076873770 (+1 ms)Writing region close event to WAL at 1732076873770Closed at 1732076873770 2024-11-20T04:27:53,772 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T04:27:53,779 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732076873773"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732076873773"}]},"ts":"1732076873773"} 2024-11-20T04:27:53,786 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T04:27:53,789 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T04:27:53,791 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732076873789"}]},"ts":"1732076873789"} 2024-11-20T04:27:53,817 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-20T04:27:53,820 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ec0fb3462118dd12411f23256603f7b1, ASSIGN}] 2024-11-20T04:27:53,823 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ec0fb3462118dd12411f23256603f7b1, ASSIGN 2024-11-20T04:27:53,825 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ec0fb3462118dd12411f23256603f7b1, ASSIGN; state=OFFLINE, location=c2a32e16c274,35243,1732076871487; forceNewPlan=false, retain=false 2024-11-20T04:27:53,976 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ec0fb3462118dd12411f23256603f7b1, regionState=OPENING, regionLocation=c2a32e16c274,35243,1732076871487 2024-11-20T04:27:53,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ec0fb3462118dd12411f23256603f7b1, ASSIGN because future has completed 2024-11-20T04:27:53,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec0fb3462118dd12411f23256603f7b1, server=c2a32e16c274,35243,1732076871487}] 2024-11-20T04:27:54,143 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:27:54,143 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ec0fb3462118dd12411f23256603f7b1, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:27:54,143 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,144 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:27:54,144 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,144 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,146 INFO [StoreOpener-ec0fb3462118dd12411f23256603f7b1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,148 INFO [StoreOpener-ec0fb3462118dd12411f23256603f7b1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec0fb3462118dd12411f23256603f7b1 columnFamilyName info 2024-11-20T04:27:54,148 DEBUG [StoreOpener-ec0fb3462118dd12411f23256603f7b1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:27:54,149 INFO [StoreOpener-ec0fb3462118dd12411f23256603f7b1-1 {}] regionserver.HStore(327): Store=ec0fb3462118dd12411f23256603f7b1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:27:54,150 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,151 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,151 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,152 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,152 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,155 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,158 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:27:54,159 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ec0fb3462118dd12411f23256603f7b1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875107, jitterRate=0.11275610327720642}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T04:27:54,159 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:27:54,160 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ec0fb3462118dd12411f23256603f7b1: Running coprocessor pre-open hook at 1732076874144Writing region info on filesystem at 1732076874144Initializing all the Stores at 1732076874145 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076874145Cleaning up temporary data from old regions at 1732076874152 (+7 ms)Running coprocessor post-open hooks at 1732076874159 (+7 ms)Region opened successfully at 1732076874160 (+1 ms) 2024-11-20T04:27:54,162 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1., pid=6, masterSystemTime=1732076874136 2024-11-20T04:27:54,165 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:27:54,165 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:27:54,167 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ec0fb3462118dd12411f23256603f7b1, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,35243,1732076871487 2024-11-20T04:27:54,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec0fb3462118dd12411f23256603f7b1, server=c2a32e16c274,35243,1732076871487 because future has completed 2024-11-20T04:27:54,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T04:27:54,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ec0fb3462118dd12411f23256603f7b1, server=c2a32e16c274,35243,1732076871487 in 190 msec 2024-11-20T04:27:54,181 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T04:27:54,181 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ec0fb3462118dd12411f23256603f7b1, ASSIGN in 356 msec 2024-11-20T04:27:54,182 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T04:27:54,182 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732076874182"}]},"ts":"1732076874182"} 2024-11-20T04:27:54,185 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-20T04:27:54,187 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T04:27:54,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 496 msec 2024-11-20T04:27:58,760 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T04:27:58,804 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T04:27:58,805 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-20T04:28:01,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T04:28:01,224 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T04:28:01,226 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-20T04:28:01,226 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T04:28:01,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:28:01,227 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T04:28:01,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T04:28:01,227 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T04:28:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:28:03,793 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-20T04:28:03,795 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-20T04:28:03,802 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-20T04:28:03,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:28:03,803 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.1732076883803 2024-11-20T04:28:03,813 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:03,813 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:03,813 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:03,814 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:03,814 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:03,814 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076872758 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076883803 2024-11-20T04:28:03,815 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37107:37107),(127.0.0.1/127.0.0.1:34091:34091)] 2024-11-20T04:28:03,816 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076872758 is not closed yet, will try archiving it next time 2024-11-20T04:28:03,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741833_1009 (size=451) 2024-11-20T04:28:03,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741833_1009 (size=451) 2024-11-20T04:28:03,819 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076872758 to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs/c2a32e16c274%2C35243%2C1732076871487.1732076872758 2024-11-20T04:28:03,826 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1., hostname=c2a32e16c274,35243,1732076871487, seqNum=2] 2024-11-20T04:28:15,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] regionserver.HRegion(8855): Flush requested on ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:28:15,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec0fb3462118dd12411f23256603f7b1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:28:15,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/b39efcba95e94de994075b0c62897dd8 is 1080, key is row0001/info:/1732076883828/Put/seqid=0 2024-11-20T04:28:15,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741838_1014 (size=12509) 2024-11-20T04:28:15,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741838_1014 (size=12509) 2024-11-20T04:28:15,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/b39efcba95e94de994075b0c62897dd8 2024-11-20T04:28:15,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/b39efcba95e94de994075b0c62897dd8 as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/b39efcba95e94de994075b0c62897dd8 2024-11-20T04:28:16,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/b39efcba95e94de994075b0c62897dd8, entries=7, sequenceid=11, filesize=12.2 K 2024-11-20T04:28:16,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ec0fb3462118dd12411f23256603f7b1 in 149ms, sequenceid=11, compaction requested=false 2024-11-20T04:28:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec0fb3462118dd12411f23256603f7b1: 2024-11-20T04:28:19,919 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T04:28:23,871 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.1732076903871 2024-11-20T04:28:24,079 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:28:24,080 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:24,080 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:24,080 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:24,080 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:24,080 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:24,080 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076883803 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076903871 2024-11-20T04:28:24,081 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34091:34091),(127.0.0.1/127.0.0.1:37107:37107)] 2024-11-20T04:28:24,082 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076883803 is not closed yet, will try archiving it next time 2024-11-20T04:28:24,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741837_1013 (size=12399) 2024-11-20T04:28:24,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741837_1013 (size=12399) 2024-11-20T04:28:24,285 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:26,489 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:28,693 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:30,897 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:30,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] regionserver.HRegion(8855): Flush requested on ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:28:30,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec0fb3462118dd12411f23256603f7b1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:28:31,100 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:31,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/f35405270db44a8d9ec6b1b7b797fa1e is 1080, key is row0008/info:/1732076897860/Put/seqid=0 2024-11-20T04:28:31,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741840_1016 (size=12509) 2024-11-20T04:28:31,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741840_1016 (size=12509) 2024-11-20T04:28:31,117 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/f35405270db44a8d9ec6b1b7b797fa1e 2024-11-20T04:28:31,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/f35405270db44a8d9ec6b1b7b797fa1e as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/f35405270db44a8d9ec6b1b7b797fa1e 2024-11-20T04:28:31,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/f35405270db44a8d9ec6b1b7b797fa1e, entries=7, sequenceid=21, filesize=12.2 K 2024-11-20T04:28:31,341 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:31,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ec0fb3462118dd12411f23256603f7b1 in 444ms, sequenceid=21, compaction requested=false 2024-11-20T04:28:31,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec0fb3462118dd12411f23256603f7b1: 2024-11-20T04:28:31,342 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-20T04:28:31,342 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:28:31,342 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/b39efcba95e94de994075b0c62897dd8 because midkey is the same as first or last row 2024-11-20T04:28:33,101 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:33,924 INFO [master/c2a32e16c274:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T04:28:33,924 INFO [master/c2a32e16c274:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T04:28:35,305 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:35,307 WARN [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:35,308 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C35243%2C1732076871487:(num 1732076903871) roll requested 2024-11-20T04:28:35,309 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.1732076915309 2024-11-20T04:28:35,517 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:35,517 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:35,518 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:35,518 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:35,518 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:35,518 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:35,518 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076903871 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076915309 2024-11-20T04:28:35,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741839_1015 (size=7739) 2024-11-20T04:28:35,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741839_1015 (size=7739) 2024-11-20T04:28:35,524 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34091:34091),(127.0.0.1/127.0.0.1:37107:37107)] 2024-11-20T04:28:35,524 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076903871 is not closed yet, will try archiving it next time 2024-11-20T04:28:35,524 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076883803 to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs/c2a32e16c274%2C35243%2C1732076871487.1732076883803 2024-11-20T04:28:37,509 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:39,144 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ec0fb3462118dd12411f23256603f7b1, had cached 0 bytes from a total of 25018 2024-11-20T04:28:39,714 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:41,918 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:44,123 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:46,125 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T04:28:46,125 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.1732076926125 2024-11-20T04:28:49,919 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T04:28:51,134 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:51,136 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK], DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK]] 2024-11-20T04:28:51,136 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C35243%2C1732076871487:(num 1732076926125) roll requested 2024-11-20T04:28:51,137 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:51,137 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:51,137 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:51,137 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:51,137 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:28:51,137 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076915309 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076926125 2024-11-20T04:28:51,138 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37107:37107),(127.0.0.1/127.0.0.1:34091:34091)] 2024-11-20T04:28:51,138 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076915309 is not closed yet, will try archiving it next time 2024-11-20T04:28:51,139 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.1732076931138 2024-11-20T04:28:51,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741841_1017 (size=4753) 2024-11-20T04:28:51,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741841_1017 (size=4753) 2024-11-20T04:28:56,142 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:28:56,142 WARN [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:28:56,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] regionserver.HRegion(8855): Flush requested on ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:28:56,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec0fb3462118dd12411f23256603f7b1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:28:56,150 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:28:56,150 WARN [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:28:58,143 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T04:29:01,145 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:29:01,145 WARN [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:29:01,146 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:01,146 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:01,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:01,147 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:01,148 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:01,148 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076926125 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076931138 2024-11-20T04:29:01,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741842_1018 (size=1569) 2024-11-20T04:29:01,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741842_1018 (size=1569) 2024-11-20T04:29:01,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/63bb8e69e8354c4183dc6542f4aa4072 is 1080, key is row0015/info:/1732076912900/Put/seqid=0 2024-11-20T04:29:01,168 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37107:37107),(127.0.0.1/127.0.0.1:34091:34091)] 2024-11-20T04:29:01,169 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076926125 is not closed yet, will try archiving it next time 2024-11-20T04:29:01,169 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C35243%2C1732076871487:(num 1732076931138) roll requested 2024-11-20T04:29:01,169 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.1732076941169 2024-11-20T04:29:01,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741844_1020 (size=12509) 2024-11-20T04:29:01,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741844_1020 (size=12509) 2024-11-20T04:29:01,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/63bb8e69e8354c4183dc6542f4aa4072 2024-11-20T04:29:01,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/63bb8e69e8354c4183dc6542f4aa4072 as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/63bb8e69e8354c4183dc6542f4aa4072 2024-11-20T04:29:01,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/63bb8e69e8354c4183dc6542f4aa4072, entries=7, sequenceid=31, filesize=12.2 K 2024-11-20T04:29:06,190 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:29:06,190 WARN [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:29:06,237 INFO [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:29:06,237 WARN [FSHLog-0-hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c-prefix:c2a32e16c274,35243,1732076871487 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34821,DS-d07dc207-e9f5-471c-a3b0-2fc1117c3c8f,DISK], DatanodeInfoWithStorage[127.0.0.1:44703,DS-5728b753-b1f4-44b5-879e-b8c868801182,DISK]] 2024-11-20T04:29:06,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ec0fb3462118dd12411f23256603f7b1 in 10094ms, sequenceid=31, compaction requested=true 2024-11-20T04:29:06,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec0fb3462118dd12411f23256603f7b1: 2024-11-20T04:29:06,237 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,237 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,237 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-20T04:29:06,237 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,237 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:06,237 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,237 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/b39efcba95e94de994075b0c62897dd8 because midkey is the same as first or last row 2024-11-20T04:29:06,237 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,238 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076931138 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076941169 2024-11-20T04:29:06,238 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37107:37107),(127.0.0.1/127.0.0.1:34091:34091)] 2024-11-20T04:29:06,239 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076931138 is not closed yet, will try archiving it next time 2024-11-20T04:29:06,239 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C35243%2C1732076871487:(num 1732076946239) roll requested 2024-11-20T04:29:06,239 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076903871 to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs/c2a32e16c274%2C35243%2C1732076871487.1732076903871 2024-11-20T04:29:06,239 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.1732076946239 2024-11-20T04:29:06,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ec0fb3462118dd12411f23256603f7b1:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:29:06,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741843_1019 (size=438) 2024-11-20T04:29:06,245 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076915309 to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs/c2a32e16c274%2C35243%2C1732076871487.1732076915309 2024-11-20T04:29:06,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741843_1019 (size=438) 2024-11-20T04:29:06,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:29:06,246 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:29:06,247 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076926125 to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs/c2a32e16c274%2C35243%2C1732076871487.1732076926125 2024-11-20T04:29:06,249 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:29:06,250 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.HStore(1541): ec0fb3462118dd12411f23256603f7b1/info is initiating minor compaction (all files) 2024-11-20T04:29:06,251 INFO [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ec0fb3462118dd12411f23256603f7b1/info in TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:29:06,251 INFO [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/b39efcba95e94de994075b0c62897dd8, hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/f35405270db44a8d9ec6b1b7b797fa1e, hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/63bb8e69e8354c4183dc6542f4aa4072] into tmpdir=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp, totalSize=36.6 K 2024-11-20T04:29:06,252 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] compactions.Compactor(225): Compacting b39efcba95e94de994075b0c62897dd8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732076883828 2024-11-20T04:29:06,253 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] compactions.Compactor(225): Compacting f35405270db44a8d9ec6b1b7b797fa1e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732076897860 2024-11-20T04:29:06,254 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63bb8e69e8354c4183dc6542f4aa4072, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732076912900 2024-11-20T04:29:06,255 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,256 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,256 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,256 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,257 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,257 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076941169 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076946239 2024-11-20T04:29:06,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741845_1021 (size=93) 2024-11-20T04:29:06,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741845_1021 (size=93) 2024-11-20T04:29:06,260 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076931138 is not closed yet, will try archiving it next time 2024-11-20T04:29:06,260 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076941169 to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs/c2a32e16c274%2C35243%2C1732076871487.1732076941169 2024-11-20T04:29:06,271 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37107:37107),(127.0.0.1/127.0.0.1:34091:34091)] 2024-11-20T04:29:06,271 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076931138 is not closed yet, will try archiving it next time 2024-11-20T04:29:06,271 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35243%2C1732076871487.1732076946271 2024-11-20T04:29:06,282 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,282 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,283 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,283 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,283 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:06,286 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076946239 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076946271 2024-11-20T04:29:06,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741846_1022 (size=1258) 2024-11-20T04:29:06,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741846_1022 (size=1258) 2024-11-20T04:29:06,288 INFO [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ec0fb3462118dd12411f23256603f7b1#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:29:06,289 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076931138 is not closed yet, will try archiving it next time 2024-11-20T04:29:06,289 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/27e04aab785b472d9d1f4c825f051266 is 1080, key is row0001/info:/1732076883828/Put/seqid=0 2024-11-20T04:29:06,295 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34091:34091),(127.0.0.1/127.0.0.1:37107:37107)] 2024-11-20T04:29:06,295 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076931138 is not closed yet, will try archiving it next time 2024-11-20T04:29:06,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741848_1024 (size=27710) 2024-11-20T04:29:06,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741848_1024 (size=27710) 2024-11-20T04:29:06,310 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/27e04aab785b472d9d1f4c825f051266 as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/27e04aab785b472d9d1f4c825f051266 2024-11-20T04:29:06,326 INFO [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ec0fb3462118dd12411f23256603f7b1/info of ec0fb3462118dd12411f23256603f7b1 into 27e04aab785b472d9d1f4c825f051266(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:29:06,326 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ec0fb3462118dd12411f23256603f7b1: 2024-11-20T04:29:06,328 INFO [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1., storeName=ec0fb3462118dd12411f23256603f7b1/info, priority=13, startTime=1732076946239; duration=0sec 2024-11-20T04:29:06,328 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T04:29:06,328 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:06,328 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/27e04aab785b472d9d1f4c825f051266 because midkey is the same as first or last row 2024-11-20T04:29:06,329 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T04:29:06,329 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:06,329 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/27e04aab785b472d9d1f4c825f051266 because midkey is the same as first or last row 2024-11-20T04:29:06,329 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T04:29:06,329 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:06,329 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/27e04aab785b472d9d1f4c825f051266 because midkey is the same as first or last row 2024-11-20T04:29:06,329 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:29:06,329 DEBUG [RS:0;c2a32e16c274:35243-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ec0fb3462118dd12411f23256603f7b1:info 2024-11-20T04:29:06,646 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/WALs/c2a32e16c274,35243,1732076871487/c2a32e16c274%2C35243%2C1732076871487.1732076931138 to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs/c2a32e16c274%2C35243%2C1732076871487.1732076931138 2024-11-20T04:29:18,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] regionserver.HRegion(8855): Flush requested on ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:29:18,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ec0fb3462118dd12411f23256603f7b1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:29:18,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/567e7665de104988a6f3229cd8fd0bd6 is 1080, key is row0022/info:/1732076946273/Put/seqid=0 2024-11-20T04:29:18,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741849_1025 (size=12509) 2024-11-20T04:29:18,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741849_1025 (size=12509) 2024-11-20T04:29:18,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/567e7665de104988a6f3229cd8fd0bd6 2024-11-20T04:29:18,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/567e7665de104988a6f3229cd8fd0bd6 as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/567e7665de104988a6f3229cd8fd0bd6 2024-11-20T04:29:18,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/567e7665de104988a6f3229cd8fd0bd6, entries=7, sequenceid=42, filesize=12.2 K 2024-11-20T04:29:18,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ec0fb3462118dd12411f23256603f7b1 in 39ms, sequenceid=42, compaction requested=false 2024-11-20T04:29:18,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ec0fb3462118dd12411f23256603f7b1: 2024-11-20T04:29:18,335 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-20T04:29:18,335 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:18,335 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/27e04aab785b472d9d1f4c825f051266 because midkey is the same as first or last row 2024-11-20T04:29:19,920 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T04:29:24,144 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ec0fb3462118dd12411f23256603f7b1, had cached 0 bytes from a total of 40219 2024-11-20T04:29:26,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T04:29:26,308 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:29:26,309 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:29:26,314 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:26,315 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:26,315 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T04:29:26,315 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T04:29:26,315 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=626812342, stopped=false 2024-11-20T04:29:26,316 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c2a32e16c274,43451,1732076870808 2024-11-20T04:29:26,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:29:26,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:26,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:29:26,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:26,318 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:29:26,319 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:29:26,319 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:29:26,319 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:29:26,319 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:29:26,319 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:26,320 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2a32e16c274,35243,1732076871487' ***** 2024-11-20T04:29:26,320 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T04:29:26,320 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T04:29:26,320 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T04:29:26,320 INFO [RS:0;c2a32e16c274:35243 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T04:29:26,321 INFO [RS:0;c2a32e16c274:35243 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T04:29:26,321 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(3091): Received CLOSE for ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:29:26,321 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(959): stopping server c2a32e16c274,35243,1732076871487 2024-11-20T04:29:26,321 INFO [RS:0;c2a32e16c274:35243 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:29:26,322 INFO [RS:0;c2a32e16c274:35243 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c2a32e16c274:35243. 2024-11-20T04:29:26,322 DEBUG [RS:0;c2a32e16c274:35243 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:29:26,322 DEBUG [RS:0;c2a32e16c274:35243 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:26,322 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T04:29:26,322 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T04:29:26,322 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T04:29:26,322 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ec0fb3462118dd12411f23256603f7b1, disabling compactions & flushes 2024-11-20T04:29:26,322 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:29:26,322 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T04:29:26,322 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:29:26,322 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. after waiting 0 ms 2024-11-20T04:29:26,322 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:29:26,322 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ec0fb3462118dd12411f23256603f7b1 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-20T04:29:26,322 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T04:29:26,323 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:29:26,323 DEBUG [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(1325): Online Regions={ec0fb3462118dd12411f23256603f7b1=TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T04:29:26,323 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:29:26,323 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:29:26,323 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:29:26,323 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:29:26,323 DEBUG [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ec0fb3462118dd12411f23256603f7b1 2024-11-20T04:29:26,323 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-20T04:29:26,333 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/c836abf1dde84b54a067512502fe0838 is 1080, key is row0029/info:/1732076960298/Put/seqid=0 2024-11-20T04:29:26,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741850_1026 (size=8193) 2024-11-20T04:29:26,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741850_1026 (size=8193) 2024-11-20T04:29:26,348 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/c836abf1dde84b54a067512502fe0838 2024-11-20T04:29:26,355 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/.tmp/info/a33c5e294f4740a5bd74f5c58446d1fb is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1./info:regioninfo/1732076874166/Put/seqid=0 2024-11-20T04:29:26,363 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/.tmp/info/c836abf1dde84b54a067512502fe0838 as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/c836abf1dde84b54a067512502fe0838 2024-11-20T04:29:26,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741851_1027 (size=7016) 2024-11-20T04:29:26,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741851_1027 (size=7016) 2024-11-20T04:29:26,365 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/.tmp/info/a33c5e294f4740a5bd74f5c58446d1fb 2024-11-20T04:29:26,373 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/c836abf1dde84b54a067512502fe0838, entries=3, sequenceid=48, filesize=8.0 K 2024-11-20T04:29:26,374 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ec0fb3462118dd12411f23256603f7b1 in 52ms, sequenceid=48, compaction requested=true 2024-11-20T04:29:26,375 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/b39efcba95e94de994075b0c62897dd8, hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/f35405270db44a8d9ec6b1b7b797fa1e, hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/63bb8e69e8354c4183dc6542f4aa4072] to archive 2024-11-20T04:29:26,379 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T04:29:26,382 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/b39efcba95e94de994075b0c62897dd8 to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/b39efcba95e94de994075b0c62897dd8 2024-11-20T04:29:26,384 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/f35405270db44a8d9ec6b1b7b797fa1e to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/f35405270db44a8d9ec6b1b7b797fa1e 2024-11-20T04:29:26,387 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/63bb8e69e8354c4183dc6542f4aa4072 to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/info/63bb8e69e8354c4183dc6542f4aa4072 2024-11-20T04:29:26,391 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/.tmp/ns/1f8ea812c7cc4b41875b9f2a5fb8e1d2 is 43, key is default/ns:d/1732076873392/Put/seqid=0 2024-11-20T04:29:26,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741852_1028 (size=5153) 2024-11-20T04:29:26,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741852_1028 (size=5153) 2024-11-20T04:29:26,401 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/.tmp/ns/1f8ea812c7cc4b41875b9f2a5fb8e1d2 2024-11-20T04:29:26,402 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c2a32e16c274:43451 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T04:29:26,407 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b39efcba95e94de994075b0c62897dd8=12509, f35405270db44a8d9ec6b1b7b797fa1e=12509, 63bb8e69e8354c4183dc6542f4aa4072=12509] 2024-11-20T04:29:26,421 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/default/TestLogRolling-testSlowSyncLogRolling/ec0fb3462118dd12411f23256603f7b1/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-20T04:29:26,430 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:29:26,430 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ec0fb3462118dd12411f23256603f7b1: Waiting for close lock at 1732076966322Running coprocessor pre-close hooks at 1732076966322Disabling compacts and flushes for region at 1732076966322Disabling writes for close at 1732076966322Obtaining lock to block concurrent updates at 1732076966322Preparing flush snapshotting stores in ec0fb3462118dd12411f23256603f7b1 at 1732076966322Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732076966323 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. at 1732076966324 (+1 ms)Flushing ec0fb3462118dd12411f23256603f7b1/info: creating writer at 1732076966324Flushing ec0fb3462118dd12411f23256603f7b1/info: appending metadata at 1732076966332 (+8 ms)Flushing ec0fb3462118dd12411f23256603f7b1/info: closing flushed file at 1732076966332Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@588fc44a: reopening flushed file at 1732076966362 (+30 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ec0fb3462118dd12411f23256603f7b1 in 52ms, sequenceid=48, compaction requested=true at 1732076966374 (+12 ms)Writing region close event to WAL at 1732076966409 (+35 ms)Running coprocessor post-close hooks at 1732076966422 (+13 ms)Closed at 1732076966430 (+8 ms) 2024-11-20T04:29:26,431 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732076873684.ec0fb3462118dd12411f23256603f7b1. 2024-11-20T04:29:26,443 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/.tmp/table/4f06f903444141a0b52fb4c11af40721 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732076874182/Put/seqid=0 2024-11-20T04:29:26,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741853_1029 (size=5396) 2024-11-20T04:29:26,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741853_1029 (size=5396) 2024-11-20T04:29:26,466 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/.tmp/table/4f06f903444141a0b52fb4c11af40721 2024-11-20T04:29:26,476 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/.tmp/info/a33c5e294f4740a5bd74f5c58446d1fb as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/info/a33c5e294f4740a5bd74f5c58446d1fb 2024-11-20T04:29:26,484 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/info/a33c5e294f4740a5bd74f5c58446d1fb, entries=10, sequenceid=11, filesize=6.9 K 2024-11-20T04:29:26,486 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/.tmp/ns/1f8ea812c7cc4b41875b9f2a5fb8e1d2 as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/ns/1f8ea812c7cc4b41875b9f2a5fb8e1d2 2024-11-20T04:29:26,494 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/ns/1f8ea812c7cc4b41875b9f2a5fb8e1d2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T04:29:26,495 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/.tmp/table/4f06f903444141a0b52fb4c11af40721 as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/table/4f06f903444141a0b52fb4c11af40721 2024-11-20T04:29:26,504 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/table/4f06f903444141a0b52fb4c11af40721, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T04:29:26,506 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 182ms, sequenceid=11, compaction requested=false 2024-11-20T04:29:26,511 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T04:29:26,513 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:29:26,513 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:29:26,513 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732076966322Running coprocessor pre-close hooks at 1732076966322Disabling compacts and flushes for region at 1732076966323 (+1 ms)Disabling writes for close at 1732076966323Obtaining lock to block concurrent updates at 1732076966323Preparing flush snapshotting stores in 1588230740 at 1732076966323Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732076966324 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732076966325 (+1 ms)Flushing 1588230740/info: creating writer at 1732076966325Flushing 1588230740/info: appending metadata at 1732076966355 (+30 ms)Flushing 1588230740/info: closing flushed file at 1732076966355Flushing 1588230740/ns: creating writer at 1732076966374 (+19 ms)Flushing 1588230740/ns: appending metadata at 1732076966391 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732076966391Flushing 1588230740/table: creating writer at 1732076966413 (+22 ms)Flushing 1588230740/table: appending metadata at 1732076966442 (+29 ms)Flushing 1588230740/table: closing flushed file at 1732076966442Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c24c3aa: reopening flushed file at 1732076966474 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d9cf91e: reopening flushed file at 1732076966485 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7991ee89: reopening flushed file at 1732076966494 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 182ms, sequenceid=11, compaction requested=false at 1732076966506 (+12 ms)Writing region close event to WAL at 1732076966507 (+1 ms)Running coprocessor post-close hooks at 1732076966512 (+5 ms)Closed at 1732076966513 (+1 ms) 2024-11-20T04:29:26,513 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T04:29:26,523 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(976): stopping server c2a32e16c274,35243,1732076871487; all regions closed. 2024-11-20T04:29:26,525 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,525 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,525 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,525 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,525 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741834_1010 (size=3066) 2024-11-20T04:29:26,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741834_1010 (size=3066) 2024-11-20T04:29:26,532 DEBUG [RS:0;c2a32e16c274:35243 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs 2024-11-20T04:29:26,532 INFO [RS:0;c2a32e16c274:35243 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C35243%2C1732076871487.meta:.meta(num 1732076873231) 2024-11-20T04:29:26,532 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,532 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,532 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,533 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,533 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:26,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741847_1023 (size=12695) 2024-11-20T04:29:26,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741847_1023 (size=12695) 2024-11-20T04:29:26,539 DEBUG [RS:0;c2a32e16c274:35243 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/oldWALs 2024-11-20T04:29:26,539 INFO [RS:0;c2a32e16c274:35243 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C35243%2C1732076871487:(num 1732076946271) 2024-11-20T04:29:26,539 DEBUG [RS:0;c2a32e16c274:35243 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:26,539 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:29:26,539 INFO [RS:0;c2a32e16c274:35243 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:29:26,540 INFO [RS:0;c2a32e16c274:35243 {}] hbase.ChoreService(370): Chore service for: regionserver/c2a32e16c274:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T04:29:26,540 INFO [RS:0;c2a32e16c274:35243 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:29:26,540 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:29:26,541 INFO [RS:0;c2a32e16c274:35243 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35243 2024-11-20T04:29:26,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2a32e16c274,35243,1732076871487 2024-11-20T04:29:26,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:29:26,555 INFO [RS:0;c2a32e16c274:35243 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:29:26,565 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2a32e16c274,35243,1732076871487] 2024-11-20T04:29:26,574 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2a32e16c274,35243,1732076871487 already deleted, retry=false 2024-11-20T04:29:26,574 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2a32e16c274,35243,1732076871487 expired; onlineServers=0 2024-11-20T04:29:26,574 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c2a32e16c274,43451,1732076870808' ***** 2024-11-20T04:29:26,574 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T04:29:26,574 INFO [M:0;c2a32e16c274:43451 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:29:26,574 INFO [M:0;c2a32e16c274:43451 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:29:26,575 DEBUG [M:0;c2a32e16c274:43451 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T04:29:26,575 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T04:29:26,575 DEBUG [M:0;c2a32e16c274:43451 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T04:29:26,575 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732076872491 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732076872491,5,FailOnTimeoutGroup] 2024-11-20T04:29:26,575 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732076872490 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732076872490,5,FailOnTimeoutGroup] 2024-11-20T04:29:26,575 INFO [M:0;c2a32e16c274:43451 {}] hbase.ChoreService(370): Chore service for: master/c2a32e16c274:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T04:29:26,575 INFO [M:0;c2a32e16c274:43451 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:29:26,575 DEBUG [M:0;c2a32e16c274:43451 {}] master.HMaster(1795): Stopping service threads 2024-11-20T04:29:26,575 INFO [M:0;c2a32e16c274:43451 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T04:29:26,575 INFO [M:0;c2a32e16c274:43451 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:29:26,576 INFO [M:0;c2a32e16c274:43451 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T04:29:26,576 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T04:29:26,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T04:29:26,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:26,584 DEBUG [M:0;c2a32e16c274:43451 {}] zookeeper.ZKUtil(347): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T04:29:26,584 WARN [M:0;c2a32e16c274:43451 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T04:29:26,585 INFO [M:0;c2a32e16c274:43451 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/.lastflushedseqids 2024-11-20T04:29:26,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741854_1030 (size=130) 2024-11-20T04:29:26,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741854_1030 (size=130) 2024-11-20T04:29:26,598 INFO [M:0;c2a32e16c274:43451 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T04:29:26,598 INFO [M:0;c2a32e16c274:43451 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T04:29:26,599 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:29:26,599 INFO [M:0;c2a32e16c274:43451 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:26,599 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:26,599 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:29:26,599 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:26,599 INFO [M:0;c2a32e16c274:43451 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-20T04:29:26,606 INFO [regionserver/c2a32e16c274:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:29:26,625 DEBUG [M:0;c2a32e16c274:43451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c4b144c219224bfb957bae9d28baeab0 is 82, key is hbase:meta,,1/info:regioninfo/1732076873307/Put/seqid=0 2024-11-20T04:29:26,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741855_1031 (size=5672) 2024-11-20T04:29:26,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741855_1031 (size=5672) 2024-11-20T04:29:26,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:29:26,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35243-0x10133a183e90001, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:29:26,666 INFO [RS:0;c2a32e16c274:35243 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:29:26,666 INFO [RS:0;c2a32e16c274:35243 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2a32e16c274,35243,1732076871487; zookeeper connection closed. 2024-11-20T04:29:26,667 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@29b229ae {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@29b229ae 2024-11-20T04:29:26,667 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T04:29:27,036 INFO [M:0;c2a32e16c274:43451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c4b144c219224bfb957bae9d28baeab0 2024-11-20T04:29:27,063 DEBUG [M:0;c2a32e16c274:43451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1261e7b57ddc4b12a5dde4b00d484353 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732076874189/Put/seqid=0 2024-11-20T04:29:27,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741856_1032 (size=6247) 2024-11-20T04:29:27,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741856_1032 (size=6247) 2024-11-20T04:29:27,071 INFO [M:0;c2a32e16c274:43451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1261e7b57ddc4b12a5dde4b00d484353 2024-11-20T04:29:27,078 INFO [M:0;c2a32e16c274:43451 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1261e7b57ddc4b12a5dde4b00d484353 2024-11-20T04:29:27,102 DEBUG [M:0;c2a32e16c274:43451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/37c6b69ee2db437fb85f5f27d8182e4f is 69, key is c2a32e16c274,35243,1732076871487/rs:state/1732076872509/Put/seqid=0 2024-11-20T04:29:27,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741857_1033 (size=5156) 2024-11-20T04:29:27,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741857_1033 (size=5156) 2024-11-20T04:29:27,115 INFO [M:0;c2a32e16c274:43451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/37c6b69ee2db437fb85f5f27d8182e4f 2024-11-20T04:29:27,148 DEBUG [M:0;c2a32e16c274:43451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4d1fce80529541efa497870c7316c3db is 52, key is load_balancer_on/state:d/1732076873663/Put/seqid=0 2024-11-20T04:29:27,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741858_1034 (size=5056) 2024-11-20T04:29:27,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741858_1034 (size=5056) 2024-11-20T04:29:27,156 INFO [M:0;c2a32e16c274:43451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4d1fce80529541efa497870c7316c3db 2024-11-20T04:29:27,165 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c4b144c219224bfb957bae9d28baeab0 as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c4b144c219224bfb957bae9d28baeab0 2024-11-20T04:29:27,172 INFO [M:0;c2a32e16c274:43451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c4b144c219224bfb957bae9d28baeab0, entries=8, sequenceid=59, filesize=5.5 K 2024-11-20T04:29:27,173 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1261e7b57ddc4b12a5dde4b00d484353 as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1261e7b57ddc4b12a5dde4b00d484353 2024-11-20T04:29:27,180 INFO [M:0;c2a32e16c274:43451 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1261e7b57ddc4b12a5dde4b00d484353 2024-11-20T04:29:27,181 INFO [M:0;c2a32e16c274:43451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1261e7b57ddc4b12a5dde4b00d484353, entries=6, sequenceid=59, filesize=6.1 K 2024-11-20T04:29:27,182 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/37c6b69ee2db437fb85f5f27d8182e4f as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/37c6b69ee2db437fb85f5f27d8182e4f 2024-11-20T04:29:27,197 INFO [M:0;c2a32e16c274:43451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/37c6b69ee2db437fb85f5f27d8182e4f, entries=1, sequenceid=59, filesize=5.0 K 2024-11-20T04:29:27,199 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4d1fce80529541efa497870c7316c3db as hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4d1fce80529541efa497870c7316c3db 2024-11-20T04:29:27,208 INFO [M:0;c2a32e16c274:43451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4d1fce80529541efa497870c7316c3db, entries=1, sequenceid=59, filesize=4.9 K 2024-11-20T04:29:27,210 INFO [M:0;c2a32e16c274:43451 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 611ms, sequenceid=59, compaction requested=false 2024-11-20T04:29:27,217 INFO [M:0;c2a32e16c274:43451 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:27,217 DEBUG [M:0;c2a32e16c274:43451 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732076966598Disabling compacts and flushes for region at 1732076966598Disabling writes for close at 1732076966599 (+1 ms)Obtaining lock to block concurrent updates at 1732076966599Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732076966599Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732076966599Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732076966600 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732076966601 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732076966624 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732076966624Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732076967046 (+422 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732076967062 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732076967062Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732076967079 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732076967101 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732076967101Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732076967124 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732076967148 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732076967148Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15e44189: reopening flushed file at 1732076967163 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c20af55: reopening flushed file at 1732076967172 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10c7bcce: reopening flushed file at 1732076967181 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d0645e7: reopening flushed file at 1732076967198 (+17 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 611ms, sequenceid=59, compaction requested=false at 1732076967210 (+12 ms)Writing region close event to WAL at 1732076967217 (+7 ms)Closed at 1732076967217 2024-11-20T04:29:27,218 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:27,218 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:27,219 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:27,219 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:27,219 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:27,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44703 is added to blk_1073741830_1006 (size=27973) 2024-11-20T04:29:27,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34821 is added to blk_1073741830_1006 (size=27973) 2024-11-20T04:29:27,225 INFO [M:0;c2a32e16c274:43451 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T04:29:27,225 INFO [M:0;c2a32e16c274:43451 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43451 2024-11-20T04:29:27,225 INFO [M:0;c2a32e16c274:43451 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:29:27,225 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:29:27,328 INFO [M:0;c2a32e16c274:43451 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:29:27,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:29:27,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43451-0x10133a183e90000, quorum=127.0.0.1:51126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:29:27,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:27,339 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:27,339 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:27,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:27,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:27,344 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:29:27,344 WARN [BP-1006445382-172.17.0.2-1732076867909 heartbeating to localhost/127.0.0.1:35115 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:29:27,344 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:29:27,344 WARN [BP-1006445382-172.17.0.2-1732076867909 heartbeating to localhost/127.0.0.1:35115 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1006445382-172.17.0.2-1732076867909 (Datanode Uuid 0ba43b70-ff8f-46fb-8a05-5888746f784f) service to localhost/127.0.0.1:35115 2024-11-20T04:29:27,346 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/data/data3/current/BP-1006445382-172.17.0.2-1732076867909 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:27,346 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/data/data4/current/BP-1006445382-172.17.0.2-1732076867909 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:27,347 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:29:27,362 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:27,363 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:27,363 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:27,363 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:27,363 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:27,365 WARN [BP-1006445382-172.17.0.2-1732076867909 heartbeating to localhost/127.0.0.1:35115 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:29:27,366 WARN [BP-1006445382-172.17.0.2-1732076867909 heartbeating to localhost/127.0.0.1:35115 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1006445382-172.17.0.2-1732076867909 (Datanode Uuid ddd74eb2-988d-44d4-9d97-d38c0552df74) service to localhost/127.0.0.1:35115 2024-11-20T04:29:27,366 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:29:27,366 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:29:27,366 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/data/data1/current/BP-1006445382-172.17.0.2-1732076867909 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:27,367 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/cluster_1f7e643f-dbdb-cf30-2517-66e95294bbae/data/data2/current/BP-1006445382-172.17.0.2-1732076867909 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:27,367 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:29:27,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:29:27,381 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:27,381 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:27,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:27,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:27,397 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T04:29:27,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T04:29:27,451 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35115 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35115 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/c2a32e16c274:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@375b938a java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35115 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/c2a32e16c274:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35115 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35115 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35115 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 app//io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields.lpConsumerIndex(BaseMpscLinkedArrayQueue.java:113) app//io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueue.poll(BaseMpscLinkedArrayQueue.java:294) app//io.netty.util.internal.shaded.org.jctools.queues.MpscUnboundedArrayQueue.poll(MpscUnboundedArrayQueue.java:23) app//io.netty.util.concurrent.SingleThreadEventExecutor.pollTaskFrom(SingleThreadEventExecutor.java:215) app//io.netty.util.concurrent.SingleThreadEventExecutor.pollTask(SingleThreadEventExecutor.java:210) app//io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:459) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:569) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35115 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35115 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/c2a32e16c274:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=205 (was 208), ProcessCount=11 (was 11), AvailableMemoryMB=8161 (was 7583) - AvailableMemoryMB LEAK? - 2024-11-20T04:29:27,461 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=205, ProcessCount=11, AvailableMemoryMB=8160 2024-11-20T04:29:27,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T04:29:27,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.log.dir so I do NOT create it in target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f 2024-11-20T04:29:27,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/99761540-35d4-0ac6-f4e5-acc1c2079f6b/hadoop.tmp.dir so I do NOT create it in target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f 2024-11-20T04:29:27,462 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2, deleteOnExit=true 2024-11-20T04:29:27,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T04:29:27,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/test.cache.data in system properties and HBase conf 2024-11-20T04:29:27,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T04:29:27,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.log.dir in system properties and HBase conf 2024-11-20T04:29:27,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T04:29:27,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T04:29:27,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T04:29:27,464 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T04:29:27,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:29:27,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:29:27,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T04:29:27,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:29:27,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T04:29:27,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T04:29:27,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:29:27,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:29:27,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T04:29:27,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/nfs.dump.dir in system properties and HBase conf 2024-11-20T04:29:27,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/java.io.tmpdir in system properties and HBase conf 2024-11-20T04:29:27,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:29:27,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T04:29:27,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T04:29:27,480 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:29:27,591 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:27,600 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:27,618 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:27,618 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:27,618 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:29:27,619 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:27,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ae773f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:27,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@630e1a46{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:27,759 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d195093{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/java.io.tmpdir/jetty-localhost-36009-hadoop-hdfs-3_4_1-tests_jar-_-any-2150517727449104562/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:29:27,759 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@476d81{HTTP/1.1, (http/1.1)}{localhost:36009} 2024-11-20T04:29:27,760 INFO [Time-limited test {}] server.Server(415): Started @101838ms 2024-11-20T04:29:27,777 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:29:27,879 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:27,883 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:27,884 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:27,884 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:27,884 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:29:27,885 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7247ee1d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:27,885 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3270c9ae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:28,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15ba4d19{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/java.io.tmpdir/jetty-localhost-44045-hadoop-hdfs-3_4_1-tests_jar-_-any-11995753224148340755/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:28,005 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d8874af{HTTP/1.1, (http/1.1)}{localhost:44045} 2024-11-20T04:29:28,005 INFO [Time-limited test {}] server.Server(415): Started @102083ms 2024-11-20T04:29:28,007 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:29:28,053 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:28,061 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:28,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:28,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:28,069 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:29:28,072 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64e5ce98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:28,072 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c30f553{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:28,127 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/data/data1/current/BP-2092897856-172.17.0.2-1732076967503/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:28,128 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/data/data2/current/BP-2092897856-172.17.0.2-1732076967503/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:28,169 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:29:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x33dd4068e1e9fa82 with lease ID 0xf19e7f42e0f294: Processing first storage report for DS-6ef1d625-d100-4fa2-8ed3-983e72bb42d6 from datanode DatanodeRegistration(127.0.0.1:43329, datanodeUuid=59363a67-7d98-4b8d-8cb3-6413b247044d, infoPort=36429, infoSecurePort=0, ipcPort=41969, storageInfo=lv=-57;cid=testClusterID;nsid=132380261;c=1732076967503) 2024-11-20T04:29:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33dd4068e1e9fa82 with lease ID 0xf19e7f42e0f294: from storage DS-6ef1d625-d100-4fa2-8ed3-983e72bb42d6 node DatanodeRegistration(127.0.0.1:43329, datanodeUuid=59363a67-7d98-4b8d-8cb3-6413b247044d, infoPort=36429, infoSecurePort=0, ipcPort=41969, storageInfo=lv=-57;cid=testClusterID;nsid=132380261;c=1732076967503), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x33dd4068e1e9fa82 with lease ID 0xf19e7f42e0f294: Processing first storage report for DS-f0823915-0722-431a-b7a9-2c47ede49b6d from datanode DatanodeRegistration(127.0.0.1:43329, datanodeUuid=59363a67-7d98-4b8d-8cb3-6413b247044d, infoPort=36429, infoSecurePort=0, ipcPort=41969, storageInfo=lv=-57;cid=testClusterID;nsid=132380261;c=1732076967503) 2024-11-20T04:29:28,177 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33dd4068e1e9fa82 with lease ID 0xf19e7f42e0f294: from storage DS-f0823915-0722-431a-b7a9-2c47ede49b6d node DatanodeRegistration(127.0.0.1:43329, datanodeUuid=59363a67-7d98-4b8d-8cb3-6413b247044d, infoPort=36429, infoSecurePort=0, ipcPort=41969, storageInfo=lv=-57;cid=testClusterID;nsid=132380261;c=1732076967503), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:28,202 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a489d78{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/java.io.tmpdir/jetty-localhost-33197-hadoop-hdfs-3_4_1-tests_jar-_-any-7676093107186701016/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:28,202 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c67d530{HTTP/1.1, (http/1.1)}{localhost:33197} 2024-11-20T04:29:28,202 INFO [Time-limited test {}] server.Server(415): Started @102281ms 2024-11-20T04:29:28,205 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:29:28,335 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/data/data4/current/BP-2092897856-172.17.0.2-1732076967503/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:28,335 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/data/data3/current/BP-2092897856-172.17.0.2-1732076967503/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:28,360 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:29:28,363 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6d70e05f09db433 with lease ID 0xf19e7f42e0f295: Processing first storage report for DS-f80307fe-156e-4901-8d7d-1bd2a2f00592 from datanode DatanodeRegistration(127.0.0.1:36463, datanodeUuid=257ae809-456c-4bae-91e5-f84c2912eec5, infoPort=35049, infoSecurePort=0, ipcPort=44423, storageInfo=lv=-57;cid=testClusterID;nsid=132380261;c=1732076967503) 2024-11-20T04:29:28,363 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6d70e05f09db433 with lease ID 0xf19e7f42e0f295: from storage DS-f80307fe-156e-4901-8d7d-1bd2a2f00592 node DatanodeRegistration(127.0.0.1:36463, datanodeUuid=257ae809-456c-4bae-91e5-f84c2912eec5, infoPort=35049, infoSecurePort=0, ipcPort=44423, storageInfo=lv=-57;cid=testClusterID;nsid=132380261;c=1732076967503), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:28,364 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6d70e05f09db433 with lease ID 0xf19e7f42e0f295: Processing first storage report for DS-c1092a44-5823-42b3-af7e-fe096af6884a from datanode DatanodeRegistration(127.0.0.1:36463, datanodeUuid=257ae809-456c-4bae-91e5-f84c2912eec5, infoPort=35049, infoSecurePort=0, ipcPort=44423, storageInfo=lv=-57;cid=testClusterID;nsid=132380261;c=1732076967503) 2024-11-20T04:29:28,364 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6d70e05f09db433 with lease ID 0xf19e7f42e0f295: from storage DS-c1092a44-5823-42b3-af7e-fe096af6884a node DatanodeRegistration(127.0.0.1:36463, datanodeUuid=257ae809-456c-4bae-91e5-f84c2912eec5, infoPort=35049, infoSecurePort=0, ipcPort=44423, storageInfo=lv=-57;cid=testClusterID;nsid=132380261;c=1732076967503), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:28,366 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f 2024-11-20T04:29:28,377 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/zookeeper_0, clientPort=62863, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T04:29:28,379 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62863 2024-11-20T04:29:28,379 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:28,381 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:28,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:29:28,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:29:28,800 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428 with version=8 2024-11-20T04:29:28,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/hbase-staging 2024-11-20T04:29:28,803 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:29:28,803 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:28,804 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:28,804 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:29:28,804 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:28,804 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:29:28,804 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T04:29:28,804 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:29:28,805 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34353 2024-11-20T04:29:28,807 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34353 connecting to ZooKeeper ensemble=127.0.0.1:62863 2024-11-20T04:29:28,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:343530x0, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:29:28,815 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34353-0x10133a304400000 connected 2024-11-20T04:29:28,861 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:28,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:28,866 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:29:28,866 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428, hbase.cluster.distributed=false 2024-11-20T04:29:28,869 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:29:28,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34353 2024-11-20T04:29:28,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34353 2024-11-20T04:29:28,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34353 2024-11-20T04:29:28,871 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34353 2024-11-20T04:29:28,871 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34353 2024-11-20T04:29:28,895 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:29:28,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:28,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:28,896 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:29:28,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:28,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:29:28,896 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T04:29:28,899 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:29:28,903 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42229 2024-11-20T04:29:28,905 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42229 connecting to ZooKeeper ensemble=127.0.0.1:62863 2024-11-20T04:29:28,906 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:28,910 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:28,932 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:422290x0, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:29:28,932 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T04:29:28,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:422290x0, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:29:28,948 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T04:29:28,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:422290x0, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T04:29:28,952 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:422290x0, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:29:28,962 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42229-0x10133a304400001 connected 2024-11-20T04:29:28,983 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42229 2024-11-20T04:29:28,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42229 2024-11-20T04:29:28,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42229 2024-11-20T04:29:28,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42229 2024-11-20T04:29:28,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42229 2024-11-20T04:29:29,035 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c2a32e16c274:34353 2024-11-20T04:29:29,037 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c2a32e16c274,34353,1732076968803 2024-11-20T04:29:29,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:29:29,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:29:29,044 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c2a32e16c274,34353,1732076968803 2024-11-20T04:29:29,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T04:29:29,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,048 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T04:29:29,049 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c2a32e16c274,34353,1732076968803 from backup master directory 2024-11-20T04:29:29,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:29:29,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c2a32e16c274,34353,1732076968803 2024-11-20T04:29:29,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:29:29,060 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:29:29,060 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c2a32e16c274,34353,1732076968803 2024-11-20T04:29:29,077 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/hbase.id] with ID: f1a2ca4e-0263-4d5b-b2ea-ca972f555a8d 2024-11-20T04:29:29,077 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/.tmp/hbase.id 2024-11-20T04:29:29,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:29:29,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:29:29,096 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/.tmp/hbase.id]:[hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/hbase.id] 2024-11-20T04:29:29,124 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:29,124 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T04:29:29,127 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-20T04:29:29,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:29:29,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:29:29,157 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:29:29,158 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T04:29:29,159 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:29:29,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:29:29,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:29:29,249 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store 2024-11-20T04:29:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:29:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:29:29,282 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:29,282 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:29:29,282 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:29,282 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:29,282 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:29:29,282 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:29,282 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:29,283 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732076969282Disabling compacts and flushes for region at 1732076969282Disabling writes for close at 1732076969282Writing region close event to WAL at 1732076969282Closed at 1732076969282 2024-11-20T04:29:29,284 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/.initializing 2024-11-20T04:29:29,285 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/WALs/c2a32e16c274,34353,1732076968803 2024-11-20T04:29:29,297 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C34353%2C1732076968803, suffix=, logDir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/WALs/c2a32e16c274,34353,1732076968803, archiveDir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/oldWALs, maxLogs=10 2024-11-20T04:29:29,297 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C34353%2C1732076968803.1732076969297 2024-11-20T04:29:29,316 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/WALs/c2a32e16c274,34353,1732076968803/c2a32e16c274%2C34353%2C1732076968803.1732076969297 2024-11-20T04:29:29,321 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36429:36429),(127.0.0.1/127.0.0.1:35049:35049)] 2024-11-20T04:29:29,322 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:29:29,322 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:29,322 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,322 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T04:29:29,329 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:29,330 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:29,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,333 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T04:29:29,333 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:29,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:29:29,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T04:29:29,346 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:29,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:29:29,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T04:29:29,351 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:29,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:29:29,352 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,358 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,360 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,363 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,363 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,365 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T04:29:29,367 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:29,372 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:29:29,372 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858746, jitterRate=0.09195245802402496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T04:29:29,374 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732076969323Initializing all the Stores at 1732076969324 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076969324Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076969326 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076969326Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076969326Cleaning up temporary data from old regions at 1732076969363 (+37 ms)Region opened successfully at 1732076969374 (+11 ms) 2024-11-20T04:29:29,375 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T04:29:29,391 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70d1b217, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:29:29,393 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T04:29:29,393 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T04:29:29,393 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T04:29:29,393 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T04:29:29,394 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T04:29:29,395 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T04:29:29,395 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T04:29:29,401 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T04:29:29,402 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T04:29:29,403 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T04:29:29,404 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T04:29:29,404 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T04:29:29,412 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T04:29:29,413 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T04:29:29,416 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T04:29:29,418 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T04:29:29,419 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T04:29:29,420 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T04:29:29,423 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T04:29:29,424 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T04:29:29,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:29:29,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:29:29,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,429 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c2a32e16c274,34353,1732076968803, sessionid=0x10133a304400000, setting cluster-up flag (Was=false) 2024-11-20T04:29:29,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,441 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T04:29:29,443 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,34353,1732076968803 2024-11-20T04:29:29,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,454 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T04:29:29,457 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,34353,1732076968803 2024-11-20T04:29:29,459 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T04:29:29,461 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T04:29:29,462 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T04:29:29,462 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T04:29:29,462 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c2a32e16c274,34353,1732076968803 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T04:29:29,465 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:29:29,465 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:29:29,465 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:29:29,465 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:29:29,466 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c2a32e16c274:0, corePoolSize=10, maxPoolSize=10 2024-11-20T04:29:29,466 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,466 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:29:29,466 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,475 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:29:29,475 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T04:29:29,476 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732076999476 2024-11-20T04:29:29,476 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T04:29:29,476 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T04:29:29,476 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T04:29:29,476 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T04:29:29,476 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T04:29:29,476 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T04:29:29,476 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,477 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T04:29:29,477 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T04:29:29,477 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T04:29:29,477 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T04:29:29,477 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T04:29:29,478 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732076969478,5,FailOnTimeoutGroup] 2024-11-20T04:29:29,478 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732076969478,5,FailOnTimeoutGroup] 2024-11-20T04:29:29,478 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,478 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T04:29:29,478 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,478 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,479 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:29,479 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T04:29:29,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:29:29,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:29:29,490 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T04:29:29,490 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428 2024-11-20T04:29:29,511 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(746): ClusterId : f1a2ca4e-0263-4d5b-b2ea-ca972f555a8d 2024-11-20T04:29:29,512 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T04:29:29,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:29:29,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:29:29,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:29,516 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T04:29:29,516 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T04:29:29,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:29:29,521 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T04:29:29,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:29:29,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:29,521 DEBUG [RS:0;c2a32e16c274:42229 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60a62078, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:29:29,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:29,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:29:29,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:29:29,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:29,528 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:29,528 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:29:29,543 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:29:29,543 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:29,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:29,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:29:29,547 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:29:29,547 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:29,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:29,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:29:29,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740 2024-11-20T04:29:29,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740 2024-11-20T04:29:29,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:29:29,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:29:29,553 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:29:29,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:29:29,562 DEBUG [RS:0;c2a32e16c274:42229 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c2a32e16c274:42229 2024-11-20T04:29:29,562 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T04:29:29,562 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T04:29:29,562 DEBUG [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T04:29:29,564 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2a32e16c274,34353,1732076968803 with port=42229, startcode=1732076968894 2024-11-20T04:29:29,564 DEBUG [RS:0;c2a32e16c274:42229 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T04:29:29,565 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:29:29,565 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715582, jitterRate=-0.09009112417697906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:29:29,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732076969516Initializing all the Stores at 1732076969518 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076969518Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076969518Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076969518Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076969518Cleaning up temporary data from old regions at 1732076969552 (+34 ms)Region opened successfully at 1732076969567 (+15 ms) 2024-11-20T04:29:29,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:29:29,569 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:29:29,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:29:29,570 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52547, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T04:29:29,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:29:29,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:29:29,570 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34353 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2a32e16c274,42229,1732076968894 2024-11-20T04:29:29,571 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34353 {}] master.ServerManager(517): Registering regionserver=c2a32e16c274,42229,1732076968894 2024-11-20T04:29:29,571 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:29:29,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732076969569Disabling compacts and flushes for region at 1732076969569Disabling writes for close at 1732076969570 (+1 ms)Writing region close event to WAL at 1732076969571 (+1 ms)Closed at 1732076969571 2024-11-20T04:29:29,575 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:29:29,575 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T04:29:29,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T04:29:29,576 DEBUG [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428 2024-11-20T04:29:29,576 DEBUG [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46841 2024-11-20T04:29:29,576 DEBUG [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T04:29:29,578 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:29:29,579 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T04:29:29,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:29:29,584 DEBUG [RS:0;c2a32e16c274:42229 {}] zookeeper.ZKUtil(111): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2a32e16c274,42229,1732076968894 2024-11-20T04:29:29,584 WARN [RS:0;c2a32e16c274:42229 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:29:29,584 INFO [RS:0;c2a32e16c274:42229 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:29:29,584 DEBUG [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/WALs/c2a32e16c274,42229,1732076968894 2024-11-20T04:29:29,585 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2a32e16c274,42229,1732076968894] 2024-11-20T04:29:29,607 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T04:29:29,612 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T04:29:29,617 INFO [RS:0;c2a32e16c274:42229 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T04:29:29,617 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,619 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T04:29:29,621 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T04:29:29,621 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,621 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,621 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:29,622 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:29:29,623 DEBUG [RS:0;c2a32e16c274:42229 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:29:29,623 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,623 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,623 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,624 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,624 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,624 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,42229,1732076968894-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:29:29,647 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T04:29:29,647 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,42229,1732076968894-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,647 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,647 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.Replication(171): c2a32e16c274,42229,1732076968894 started 2024-11-20T04:29:29,664 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:29,665 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(1482): Serving as c2a32e16c274,42229,1732076968894, RpcServer on c2a32e16c274/172.17.0.2:42229, sessionid=0x10133a304400001 2024-11-20T04:29:29,665 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T04:29:29,665 DEBUG [RS:0;c2a32e16c274:42229 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2a32e16c274,42229,1732076968894 2024-11-20T04:29:29,665 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,42229,1732076968894' 2024-11-20T04:29:29,665 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T04:29:29,667 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T04:29:29,667 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T04:29:29,668 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T04:29:29,668 DEBUG [RS:0;c2a32e16c274:42229 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2a32e16c274,42229,1732076968894 2024-11-20T04:29:29,668 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,42229,1732076968894' 2024-11-20T04:29:29,668 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T04:29:29,668 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T04:29:29,669 DEBUG [RS:0;c2a32e16c274:42229 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T04:29:29,669 INFO [RS:0;c2a32e16c274:42229 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T04:29:29,669 INFO [RS:0;c2a32e16c274:42229 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T04:29:29,731 WARN [c2a32e16c274:34353 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T04:29:29,772 INFO [RS:0;c2a32e16c274:42229 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C42229%2C1732076968894, suffix=, logDir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/WALs/c2a32e16c274,42229,1732076968894, archiveDir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/oldWALs, maxLogs=32 2024-11-20T04:29:29,774 INFO [RS:0;c2a32e16c274:42229 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C42229%2C1732076968894.1732076969774 2024-11-20T04:29:29,800 INFO [RS:0;c2a32e16c274:42229 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/WALs/c2a32e16c274,42229,1732076968894/c2a32e16c274%2C42229%2C1732076968894.1732076969774 2024-11-20T04:29:29,810 DEBUG [RS:0;c2a32e16c274:42229 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35049:35049),(127.0.0.1/127.0.0.1:36429:36429)] 2024-11-20T04:29:29,981 DEBUG [c2a32e16c274:34353 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T04:29:29,982 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c2a32e16c274,42229,1732076968894 2024-11-20T04:29:29,984 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,42229,1732076968894, state=OPENING 2024-11-20T04:29:29,986 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T04:29:29,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:29,988 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:29:29,988 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:29:29,988 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:29:29,988 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,42229,1732076968894}] 2024-11-20T04:29:30,142 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T04:29:30,145 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59349, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T04:29:30,149 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T04:29:30,150 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:29:30,152 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C42229%2C1732076968894.meta, suffix=.meta, logDir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/WALs/c2a32e16c274,42229,1732076968894, archiveDir=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/oldWALs, maxLogs=32 2024-11-20T04:29:30,154 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C42229%2C1732076968894.meta.1732076970153.meta 2024-11-20T04:29:30,160 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/WALs/c2a32e16c274,42229,1732076968894/c2a32e16c274%2C42229%2C1732076968894.meta.1732076970153.meta 2024-11-20T04:29:30,161 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36429:36429),(127.0.0.1/127.0.0.1:35049:35049)] 2024-11-20T04:29:30,161 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:29:30,162 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T04:29:30,162 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T04:29:30,162 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T04:29:30,162 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T04:29:30,162 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:30,162 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T04:29:30,162 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T04:29:30,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:29:30,165 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:29:30,165 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:30,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:30,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:29:30,167 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:29:30,167 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:30,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:30,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:29:30,168 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:29:30,168 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:30,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:30,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:29:30,170 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:29:30,170 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:30,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:30,170 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:29:30,172 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740 2024-11-20T04:29:30,173 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740 2024-11-20T04:29:30,175 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:29:30,175 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:29:30,176 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:29:30,178 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:29:30,179 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=768889, jitterRate=-0.022307604551315308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:29:30,179 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T04:29:30,180 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732076970162Writing region info on filesystem at 1732076970162Initializing all the Stores at 1732076970163 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076970164 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076970164Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076970164Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076970164Cleaning up temporary data from old regions at 1732076970175 (+11 ms)Running coprocessor post-open hooks at 1732076970179 (+4 ms)Region opened successfully at 1732076970180 (+1 ms) 2024-11-20T04:29:30,182 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732076970142 2024-11-20T04:29:30,185 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T04:29:30,185 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T04:29:30,186 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,42229,1732076968894 2024-11-20T04:29:30,188 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,42229,1732076968894, state=OPEN 2024-11-20T04:29:30,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:29:30,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:29:30,194 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:29:30,194 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:29:30,194 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c2a32e16c274,42229,1732076968894 2024-11-20T04:29:30,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T04:29:30,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,42229,1732076968894 in 206 msec 2024-11-20T04:29:30,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T04:29:30,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 623 msec 2024-11-20T04:29:30,203 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:29:30,203 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T04:29:30,205 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:29:30,205 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,42229,1732076968894, seqNum=-1] 2024-11-20T04:29:30,205 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:29:30,207 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55879, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:29:30,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 753 msec 2024-11-20T04:29:30,215 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732076970215, completionTime=-1 2024-11-20T04:29:30,215 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T04:29:30,215 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T04:29:30,217 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T04:29:30,218 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732077030217 2024-11-20T04:29:30,218 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732077090218 2024-11-20T04:29:30,218 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T04:29:30,218 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34353,1732076968803-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:30,218 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34353,1732076968803-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:30,218 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34353,1732076968803-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:30,218 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c2a32e16c274:34353, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:30,218 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:30,218 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:30,220 DEBUG [master/c2a32e16c274:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T04:29:30,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.163sec 2024-11-20T04:29:30,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T04:29:30,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T04:29:30,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T04:29:30,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T04:29:30,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T04:29:30,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34353,1732076968803-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:29:30,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34353,1732076968803-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T04:29:30,228 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T04:29:30,228 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T04:29:30,228 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34353,1732076968803-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:30,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60dafc4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:29:30,311 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c2a32e16c274,34353,-1 for getting cluster id 2024-11-20T04:29:30,312 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T04:29:30,314 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f1a2ca4e-0263-4d5b-b2ea-ca972f555a8d' 2024-11-20T04:29:30,314 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T04:29:30,315 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f1a2ca4e-0263-4d5b-b2ea-ca972f555a8d" 2024-11-20T04:29:30,315 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ef451c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:29:30,315 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c2a32e16c274,34353,-1] 2024-11-20T04:29:30,315 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T04:29:30,316 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:30,318 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49436, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T04:29:30,319 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a89c49a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:29:30,320 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:29:30,321 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,42229,1732076968894, seqNum=-1] 2024-11-20T04:29:30,322 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:29:30,324 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36814, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:29:30,327 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c2a32e16c274,34353,1732076968803 2024-11-20T04:29:30,327 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:30,331 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T04:29:30,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T04:29:30,331 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:29:30,332 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:29:30,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:30,333 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:30,333 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T04:29:30,333 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T04:29:30,333 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=292436729, stopped=false 2024-11-20T04:29:30,333 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c2a32e16c274,34353,1732076968803 2024-11-20T04:29:30,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:29:30,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:30,335 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:29:30,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:29:30,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:30,336 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:29:30,336 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:29:30,336 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:30,336 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:29:30,336 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2a32e16c274,42229,1732076968894' ***** 2024-11-20T04:29:30,337 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T04:29:30,337 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:29:30,337 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T04:29:30,337 INFO [RS:0;c2a32e16c274:42229 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T04:29:30,337 INFO [RS:0;c2a32e16c274:42229 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T04:29:30,337 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(959): stopping server c2a32e16c274,42229,1732076968894 2024-11-20T04:29:30,337 INFO [RS:0;c2a32e16c274:42229 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:29:30,337 INFO [RS:0;c2a32e16c274:42229 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c2a32e16c274:42229. 2024-11-20T04:29:30,338 DEBUG [RS:0;c2a32e16c274:42229 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:29:30,338 DEBUG [RS:0;c2a32e16c274:42229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:30,338 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T04:29:30,338 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T04:29:30,338 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T04:29:30,338 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T04:29:30,338 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T04:29:30,338 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-20T04:29:30,338 DEBUG [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T04:29:30,338 DEBUG [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T04:29:30,339 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:29:30,339 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:29:30,339 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:29:30,339 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:29:30,339 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:29:30,339 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-20T04:29:30,360 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740/.tmp/ns/154204bdd23f4a6c8c2bdc3df3eb8874 is 43, key is default/ns:d/1732076970208/Put/seqid=0 2024-11-20T04:29:30,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741835_1011 (size=5153) 2024-11-20T04:29:30,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741835_1011 (size=5153) 2024-11-20T04:29:30,367 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740/.tmp/ns/154204bdd23f4a6c8c2bdc3df3eb8874 2024-11-20T04:29:30,377 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740/.tmp/ns/154204bdd23f4a6c8c2bdc3df3eb8874 as hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740/ns/154204bdd23f4a6c8c2bdc3df3eb8874 2024-11-20T04:29:30,386 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740/ns/154204bdd23f4a6c8c2bdc3df3eb8874, entries=2, sequenceid=6, filesize=5.0 K 2024-11-20T04:29:30,387 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 48ms, sequenceid=6, compaction requested=false 2024-11-20T04:29:30,387 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T04:29:30,394 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T04:29:30,395 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:29:30,395 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:29:30,396 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732076970339Running coprocessor pre-close hooks at 1732076970339Disabling compacts and flushes for region at 1732076970339Disabling writes for close at 1732076970339Obtaining lock to block concurrent updates at 1732076970339Preparing flush snapshotting stores in 1588230740 at 1732076970339Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732076970340 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732076970341 (+1 ms)Flushing 1588230740/ns: creating writer at 1732076970341Flushing 1588230740/ns: appending metadata at 1732076970359 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732076970359Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d0e77e6: reopening flushed file at 1732076970376 (+17 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 48ms, sequenceid=6, compaction requested=false at 1732076970387 (+11 ms)Writing region close event to WAL at 1732076970389 (+2 ms)Running coprocessor post-close hooks at 1732076970395 (+6 ms)Closed at 1732076970395 2024-11-20T04:29:30,396 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T04:29:30,539 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(976): stopping server c2a32e16c274,42229,1732076968894; all regions closed. 2024-11-20T04:29:30,539 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,540 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,540 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,540 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,540 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741834_1010 (size=1152) 2024-11-20T04:29:30,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741834_1010 (size=1152) 2024-11-20T04:29:30,548 DEBUG [RS:0;c2a32e16c274:42229 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/oldWALs 2024-11-20T04:29:30,548 INFO [RS:0;c2a32e16c274:42229 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C42229%2C1732076968894.meta:.meta(num 1732076970153) 2024-11-20T04:29:30,548 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,548 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,549 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,549 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,549 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741833_1009 (size=93) 2024-11-20T04:29:30,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741833_1009 (size=93) 2024-11-20T04:29:30,555 DEBUG [RS:0;c2a32e16c274:42229 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/oldWALs 2024-11-20T04:29:30,555 INFO [RS:0;c2a32e16c274:42229 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C42229%2C1732076968894:(num 1732076969774) 2024-11-20T04:29:30,556 DEBUG [RS:0;c2a32e16c274:42229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:30,556 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:29:30,556 INFO [RS:0;c2a32e16c274:42229 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:29:30,556 INFO [RS:0;c2a32e16c274:42229 {}] hbase.ChoreService(370): Chore service for: regionserver/c2a32e16c274:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T04:29:30,556 INFO [RS:0;c2a32e16c274:42229 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:29:30,556 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:29:30,557 INFO [RS:0;c2a32e16c274:42229 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42229 2024-11-20T04:29:30,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:29:30,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2a32e16c274,42229,1732076968894 2024-11-20T04:29:30,560 INFO [RS:0;c2a32e16c274:42229 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:29:30,562 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2a32e16c274,42229,1732076968894] 2024-11-20T04:29:30,563 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2a32e16c274,42229,1732076968894 already deleted, retry=false 2024-11-20T04:29:30,564 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2a32e16c274,42229,1732076968894 expired; onlineServers=0 2024-11-20T04:29:30,564 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c2a32e16c274,34353,1732076968803' ***** 2024-11-20T04:29:30,564 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T04:29:30,564 INFO [M:0;c2a32e16c274:34353 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:29:30,564 INFO [M:0;c2a32e16c274:34353 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:29:30,564 DEBUG [M:0;c2a32e16c274:34353 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T04:29:30,564 DEBUG [M:0;c2a32e16c274:34353 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T04:29:30,564 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T04:29:30,564 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732076969478 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732076969478,5,FailOnTimeoutGroup] 2024-11-20T04:29:30,564 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732076969478 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732076969478,5,FailOnTimeoutGroup] 2024-11-20T04:29:30,564 INFO [M:0;c2a32e16c274:34353 {}] hbase.ChoreService(370): Chore service for: master/c2a32e16c274:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T04:29:30,565 INFO [M:0;c2a32e16c274:34353 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:29:30,565 DEBUG [M:0;c2a32e16c274:34353 {}] master.HMaster(1795): Stopping service threads 2024-11-20T04:29:30,565 INFO [M:0;c2a32e16c274:34353 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T04:29:30,565 INFO [M:0;c2a32e16c274:34353 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:29:30,565 INFO [M:0;c2a32e16c274:34353 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T04:29:30,565 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T04:29:30,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T04:29:30,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:30,569 DEBUG [M:0;c2a32e16c274:34353 {}] zookeeper.ZKUtil(347): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T04:29:30,569 WARN [M:0;c2a32e16c274:34353 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T04:29:30,570 INFO [M:0;c2a32e16c274:34353 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/.lastflushedseqids 2024-11-20T04:29:30,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741836_1012 (size=99) 2024-11-20T04:29:30,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741836_1012 (size=99) 2024-11-20T04:29:30,578 INFO [M:0;c2a32e16c274:34353 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T04:29:30,578 INFO [M:0;c2a32e16c274:34353 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T04:29:30,579 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:29:30,579 INFO [M:0;c2a32e16c274:34353 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:30,579 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:30,579 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:29:30,579 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:30,579 INFO [M:0;c2a32e16c274:34353 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-20T04:29:30,598 DEBUG [M:0;c2a32e16c274:34353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/489e3912ef8743d6bafdbf73674af383 is 82, key is hbase:meta,,1/info:regioninfo/1732076970186/Put/seqid=0 2024-11-20T04:29:30,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741837_1013 (size=5672) 2024-11-20T04:29:30,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741837_1013 (size=5672) 2024-11-20T04:29:30,604 INFO [M:0;c2a32e16c274:34353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/489e3912ef8743d6bafdbf73674af383 2024-11-20T04:29:30,638 DEBUG [M:0;c2a32e16c274:34353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a717859af84045ca912bf87ff0c01acf is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732076970214/Put/seqid=0 2024-11-20T04:29:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741838_1014 (size=5275) 2024-11-20T04:29:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741838_1014 (size=5275) 2024-11-20T04:29:30,646 INFO [M:0;c2a32e16c274:34353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a717859af84045ca912bf87ff0c01acf 2024-11-20T04:29:30,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:29:30,662 INFO [RS:0;c2a32e16c274:42229 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:29:30,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42229-0x10133a304400001, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:29:30,662 INFO [RS:0;c2a32e16c274:42229 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2a32e16c274,42229,1732076968894; zookeeper connection closed. 2024-11-20T04:29:30,662 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@46242ee7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@46242ee7 2024-11-20T04:29:30,663 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T04:29:30,678 DEBUG [M:0;c2a32e16c274:34353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4a8dec46acc543729027b8967e2d384c is 69, key is c2a32e16c274,42229,1732076968894/rs:state/1732076969571/Put/seqid=0 2024-11-20T04:29:30,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741839_1015 (size=5156) 2024-11-20T04:29:30,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741839_1015 (size=5156) 2024-11-20T04:29:30,685 INFO [M:0;c2a32e16c274:34353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4a8dec46acc543729027b8967e2d384c 2024-11-20T04:29:30,708 DEBUG [M:0;c2a32e16c274:34353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/33ce246ab931478dafb7e1a298c1eb41 is 52, key is load_balancer_on/state:d/1732076970329/Put/seqid=0 2024-11-20T04:29:30,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741840_1016 (size=5056) 2024-11-20T04:29:30,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741840_1016 (size=5056) 2024-11-20T04:29:30,715 INFO [M:0;c2a32e16c274:34353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/33ce246ab931478dafb7e1a298c1eb41 2024-11-20T04:29:30,725 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/489e3912ef8743d6bafdbf73674af383 as hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/489e3912ef8743d6bafdbf73674af383 2024-11-20T04:29:30,733 INFO [M:0;c2a32e16c274:34353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/489e3912ef8743d6bafdbf73674af383, entries=8, sequenceid=29, filesize=5.5 K 2024-11-20T04:29:30,734 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a717859af84045ca912bf87ff0c01acf as hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a717859af84045ca912bf87ff0c01acf 2024-11-20T04:29:30,742 INFO [M:0;c2a32e16c274:34353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a717859af84045ca912bf87ff0c01acf, entries=3, sequenceid=29, filesize=5.2 K 2024-11-20T04:29:30,744 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4a8dec46acc543729027b8967e2d384c as hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4a8dec46acc543729027b8967e2d384c 2024-11-20T04:29:30,753 INFO [M:0;c2a32e16c274:34353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4a8dec46acc543729027b8967e2d384c, entries=1, sequenceid=29, filesize=5.0 K 2024-11-20T04:29:30,755 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/33ce246ab931478dafb7e1a298c1eb41 as hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/33ce246ab931478dafb7e1a298c1eb41 2024-11-20T04:29:30,761 INFO [M:0;c2a32e16c274:34353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46841/user/jenkins/test-data/c0437946-20cd-361d-f86e-26de1d209428/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/33ce246ab931478dafb7e1a298c1eb41, entries=1, sequenceid=29, filesize=4.9 K 2024-11-20T04:29:30,763 INFO [M:0;c2a32e16c274:34353 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 184ms, sequenceid=29, compaction requested=false 2024-11-20T04:29:30,765 INFO [M:0;c2a32e16c274:34353 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:30,765 DEBUG [M:0;c2a32e16c274:34353 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732076970579Disabling compacts and flushes for region at 1732076970579Disabling writes for close at 1732076970579Obtaining lock to block concurrent updates at 1732076970579Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732076970579Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732076970580 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732076970581 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732076970581Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732076970597 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732076970597Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732076970610 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732076970637 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732076970637Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732076970654 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732076970677 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732076970677Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732076970691 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732076970707 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732076970707Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c18f4c9: reopening flushed file at 1732076970723 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b340e4e: reopening flushed file at 1732076970733 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bd6162a: reopening flushed file at 1732076970742 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f3ce6da: reopening flushed file at 1732076970753 (+11 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 184ms, sequenceid=29, compaction requested=false at 1732076970763 (+10 ms)Writing region close event to WAL at 1732076970764 (+1 ms)Closed at 1732076970765 (+1 ms) 2024-11-20T04:29:30,765 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,765 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,766 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,766 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,766 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:30,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36463 is added to blk_1073741830_1006 (size=10311) 2024-11-20T04:29:30,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43329 is added to blk_1073741830_1006 (size=10311) 2024-11-20T04:29:30,771 INFO [M:0;c2a32e16c274:34353 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T04:29:30,772 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:29:30,772 INFO [M:0;c2a32e16c274:34353 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34353 2024-11-20T04:29:30,773 INFO [M:0;c2a32e16c274:34353 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:29:30,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:29:30,875 INFO [M:0;c2a32e16c274:34353 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:29:30,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34353-0x10133a304400000, quorum=127.0.0.1:62863, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:29:30,881 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a489d78{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:30,882 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c67d530{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:30,882 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:30,883 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c30f553{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:30,883 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64e5ce98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:30,886 WARN [BP-2092897856-172.17.0.2-1732076967503 heartbeating to localhost/127.0.0.1:46841 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:29:30,886 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:29:30,886 WARN [BP-2092897856-172.17.0.2-1732076967503 heartbeating to localhost/127.0.0.1:46841 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2092897856-172.17.0.2-1732076967503 (Datanode Uuid 257ae809-456c-4bae-91e5-f84c2912eec5) service to localhost/127.0.0.1:46841 2024-11-20T04:29:30,886 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:29:30,887 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/data/data3/current/BP-2092897856-172.17.0.2-1732076967503 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:30,887 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/data/data4/current/BP-2092897856-172.17.0.2-1732076967503 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:30,887 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:29:30,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15ba4d19{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:30,891 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d8874af{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:30,891 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:30,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3270c9ae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:30,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7247ee1d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:30,894 WARN [BP-2092897856-172.17.0.2-1732076967503 heartbeating to localhost/127.0.0.1:46841 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:29:30,894 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:29:30,894 WARN [BP-2092897856-172.17.0.2-1732076967503 heartbeating to localhost/127.0.0.1:46841 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2092897856-172.17.0.2-1732076967503 (Datanode Uuid 59363a67-7d98-4b8d-8cb3-6413b247044d) service to localhost/127.0.0.1:46841 2024-11-20T04:29:30,894 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:29:30,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/data/data1/current/BP-2092897856-172.17.0.2-1732076967503 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:30,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/cluster_f8769a7a-d674-b732-1aa4-49a2fba607a2/data/data2/current/BP-2092897856-172.17.0.2-1732076967503 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:30,896 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:29:30,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d195093{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:29:30,904 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@476d81{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:30,904 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:30,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@630e1a46{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:30,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ae773f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:30,914 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T04:29:30,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T04:29:30,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.log.dir so I do NOT create it in target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/47d1d4df-3d4a-8d58-5869-0e6be560c27f/hadoop.tmp.dir so I do NOT create it in target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818, deleteOnExit=true 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/test.cache.data in system properties and HBase conf 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir in system properties and HBase conf 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T04:29:30,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T04:29:30,937 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T04:29:30,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:29:30,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:29:30,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T04:29:30,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:29:30,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T04:29:30,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T04:29:30,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:29:30,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:29:30,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T04:29:30,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/nfs.dump.dir in system properties and HBase conf 2024-11-20T04:29:30,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/java.io.tmpdir in system properties and HBase conf 2024-11-20T04:29:30,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:29:30,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T04:29:30,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T04:29:30,956 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:29:31,047 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:31,054 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:31,057 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:31,057 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:31,057 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:29:31,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:31,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35c95cb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:31,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d8a9c69{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:31,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@56ff2226{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/java.io.tmpdir/jetty-localhost-36387-hadoop-hdfs-3_4_1-tests_jar-_-any-16726578391896649966/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:29:31,195 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b5c1b0d{HTTP/1.1, (http/1.1)}{localhost:36387} 2024-11-20T04:29:31,195 INFO [Time-limited test {}] server.Server(415): Started @105273ms 2024-11-20T04:29:31,211 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:29:31,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:29:31,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T04:29:31,225 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T04:29:31,225 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-20T04:29:31,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:31,316 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:31,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:31,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:31,319 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:29:31,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5917cb43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:31,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@259c861e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:31,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:31,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:31,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3150e6db{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/java.io.tmpdir/jetty-localhost-45893-hadoop-hdfs-3_4_1-tests_jar-_-any-551479197569312902/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:31,446 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@539e83f9{HTTP/1.1, (http/1.1)}{localhost:45893} 2024-11-20T04:29:31,446 INFO [Time-limited test {}] server.Server(415): Started @105525ms 2024-11-20T04:29:31,455 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:29:31,522 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T04:29:31,525 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:31,543 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:31,545 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:31,547 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:31,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:31,570 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:31,572 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:31,572 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:31,573 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:29:31,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4549eece{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:31,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c80aceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:31,620 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data1/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:31,622 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data2/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:31,625 INFO [regionserver/c2a32e16c274:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:29:31,669 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:29:31,676 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2459c4784b6f733a with lease ID 0x271a803eb366b81c: Processing first storage report for DS-e8ea5853-cc6d-4305-9215-b019e27df0a1 from datanode DatanodeRegistration(127.0.0.1:36545, datanodeUuid=a43b2b1a-5865-4441-b03b-2a8b632bbbd0, infoPort=38063, infoSecurePort=0, ipcPort=38571, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:31,676 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2459c4784b6f733a with lease ID 0x271a803eb366b81c: from storage DS-e8ea5853-cc6d-4305-9215-b019e27df0a1 node DatanodeRegistration(127.0.0.1:36545, datanodeUuid=a43b2b1a-5865-4441-b03b-2a8b632bbbd0, infoPort=38063, infoSecurePort=0, ipcPort=38571, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T04:29:31,676 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2459c4784b6f733a with lease ID 0x271a803eb366b81c: Processing first storage report for DS-f379abd8-bad4-427b-9fec-27b115029bb1 from datanode DatanodeRegistration(127.0.0.1:36545, datanodeUuid=a43b2b1a-5865-4441-b03b-2a8b632bbbd0, infoPort=38063, infoSecurePort=0, ipcPort=38571, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:31,676 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2459c4784b6f733a with lease ID 0x271a803eb366b81c: from storage DS-f379abd8-bad4-427b-9fec-27b115029bb1 node DatanodeRegistration(127.0.0.1:36545, datanodeUuid=a43b2b1a-5865-4441-b03b-2a8b632bbbd0, infoPort=38063, infoSecurePort=0, ipcPort=38571, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:31,737 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21a23b22{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/java.io.tmpdir/jetty-localhost-41717-hadoop-hdfs-3_4_1-tests_jar-_-any-3761961951365382003/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:31,738 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21f2acf7{HTTP/1.1, (http/1.1)}{localhost:41717} 2024-11-20T04:29:31,738 INFO [Time-limited test {}] server.Server(415): Started @105816ms 2024-11-20T04:29:31,740 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:29:31,875 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data4/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:31,875 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data3/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:31,903 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:29:31,906 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65b9f9048176e5e9 with lease ID 0x271a803eb366b81d: Processing first storage report for DS-fad33451-7600-49b5-b381-276f45eb9326 from datanode DatanodeRegistration(127.0.0.1:45707, datanodeUuid=83959f4a-849d-48e1-94ea-6cb816411dc3, infoPort=41075, infoSecurePort=0, ipcPort=42511, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:31,906 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65b9f9048176e5e9 with lease ID 0x271a803eb366b81d: from storage DS-fad33451-7600-49b5-b381-276f45eb9326 node DatanodeRegistration(127.0.0.1:45707, datanodeUuid=83959f4a-849d-48e1-94ea-6cb816411dc3, infoPort=41075, infoSecurePort=0, ipcPort=42511, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:31,906 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65b9f9048176e5e9 with lease ID 0x271a803eb366b81d: Processing first storage report for DS-267587b6-c838-477b-8f08-6b95c074cf82 from datanode DatanodeRegistration(127.0.0.1:45707, datanodeUuid=83959f4a-849d-48e1-94ea-6cb816411dc3, infoPort=41075, infoSecurePort=0, ipcPort=42511, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:31,906 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65b9f9048176e5e9 with lease ID 0x271a803eb366b81d: from storage DS-267587b6-c838-477b-8f08-6b95c074cf82 node DatanodeRegistration(127.0.0.1:45707, datanodeUuid=83959f4a-849d-48e1-94ea-6cb816411dc3, infoPort=41075, infoSecurePort=0, ipcPort=42511, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:31,989 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653 2024-11-20T04:29:31,992 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/zookeeper_0, clientPort=53242, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T04:29:31,994 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53242 2024-11-20T04:29:31,994 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:31,996 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:32,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45707 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:29:32,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36545 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:29:32,012 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094 with version=8 2024-11-20T04:29:32,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/hbase-staging 2024-11-20T04:29:32,015 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:29:32,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:32,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:32,015 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:29:32,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:32,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:29:32,016 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T04:29:32,016 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:29:32,017 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42739 2024-11-20T04:29:32,019 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42739 connecting to ZooKeeper ensemble=127.0.0.1:53242 2024-11-20T04:29:32,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:427390x0, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:29:32,029 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42739-0x10133a312670000 connected 2024-11-20T04:29:32,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:32,070 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:32,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:29:32,074 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094, hbase.cluster.distributed=false 2024-11-20T04:29:32,076 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:29:32,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42739 2024-11-20T04:29:32,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42739 2024-11-20T04:29:32,090 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42739 2024-11-20T04:29:32,093 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42739 2024-11-20T04:29:32,099 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42739 2024-11-20T04:29:32,119 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:29:32,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:32,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:32,120 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:29:32,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:32,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:29:32,120 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T04:29:32,120 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:29:32,121 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40241 2024-11-20T04:29:32,123 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40241 connecting to ZooKeeper ensemble=127.0.0.1:53242 2024-11-20T04:29:32,123 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:32,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:32,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:402410x0, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:29:32,136 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:29:32,136 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40241-0x10133a312670001 connected 2024-11-20T04:29:32,136 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T04:29:32,141 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T04:29:32,142 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T04:29:32,143 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:29:32,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40241 2024-11-20T04:29:32,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40241 2024-11-20T04:29:32,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40241 2024-11-20T04:29:32,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40241 2024-11-20T04:29:32,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40241 2024-11-20T04:29:32,165 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c2a32e16c274:42739 2024-11-20T04:29:32,168 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c2a32e16c274,42739,1732076972014 2024-11-20T04:29:32,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:29:32,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:29:32,171 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c2a32e16c274,42739,1732076972014 2024-11-20T04:29:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T04:29:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,174 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T04:29:32,174 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c2a32e16c274,42739,1732076972014 from backup master directory 2024-11-20T04:29:32,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c2a32e16c274,42739,1732076972014 2024-11-20T04:29:32,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:29:32,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:29:32,176 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:29:32,176 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c2a32e16c274,42739,1732076972014 2024-11-20T04:29:32,181 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/hbase.id] with ID: 930d9cac-07d5-45bc-bf1a-b2222d362bc4 2024-11-20T04:29:32,181 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/.tmp/hbase.id 2024-11-20T04:29:32,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45707 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:29:32,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36545 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:29:32,201 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/.tmp/hbase.id]:[hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/hbase.id] 2024-11-20T04:29:32,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:32,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T04:29:32,226 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-20T04:29:32,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45707 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:29:32,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36545 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:29:32,248 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:29:32,249 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T04:29:32,249 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:29:32,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45707 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:29:32,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36545 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:29:32,261 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store 2024-11-20T04:29:32,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36545 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:29:32,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45707 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:29:32,276 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:32,276 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:29:32,277 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:32,277 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:32,277 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:29:32,277 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:32,277 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:29:32,277 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732076972276Disabling compacts and flushes for region at 1732076972276Disabling writes for close at 1732076972277 (+1 ms)Writing region close event to WAL at 1732076972277Closed at 1732076972277 2024-11-20T04:29:32,278 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/.initializing 2024-11-20T04:29:32,278 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014 2024-11-20T04:29:32,282 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C42739%2C1732076972014, suffix=, logDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014, archiveDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/oldWALs, maxLogs=10 2024-11-20T04:29:32,283 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C42739%2C1732076972014.1732076972283 2024-11-20T04:29:32,293 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 2024-11-20T04:29:32,298 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41075:41075),(127.0.0.1/127.0.0.1:38063:38063)] 2024-11-20T04:29:32,299 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:29:32,299 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:32,299 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,299 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,301 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,303 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T04:29:32,303 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:32,304 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:32,304 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T04:29:32,306 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:32,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:29:32,307 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T04:29:32,309 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:32,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:29:32,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T04:29:32,312 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:32,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:29:32,317 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,318 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,318 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,320 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,320 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,321 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T04:29:32,323 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:29:32,325 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:29:32,326 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860650, jitterRate=0.09437361359596252}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T04:29:32,327 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732076972299Initializing all the Stores at 1732076972301 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076972301Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076972301Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076972301Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076972301Cleaning up temporary data from old regions at 1732076972321 (+20 ms)Region opened successfully at 1732076972327 (+6 ms) 2024-11-20T04:29:32,327 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T04:29:32,331 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28e6e824, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:29:32,332 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T04:29:32,332 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T04:29:32,333 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T04:29:32,333 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T04:29:32,335 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T04:29:32,335 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T04:29:32,335 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T04:29:32,339 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T04:29:32,339 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T04:29:32,352 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T04:29:32,353 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T04:29:32,354 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T04:29:32,359 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T04:29:32,360 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T04:29:32,361 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T04:29:32,366 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T04:29:32,368 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T04:29:32,371 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T04:29:32,373 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T04:29:32,375 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T04:29:32,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:29:32,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:29:32,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,380 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c2a32e16c274,42739,1732076972014, sessionid=0x10133a312670000, setting cluster-up flag (Was=false) 2024-11-20T04:29:32,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,394 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T04:29:32,395 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,42739,1732076972014 2024-11-20T04:29:32,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,410 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T04:29:32,411 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,42739,1732076972014 2024-11-20T04:29:32,413 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T04:29:32,415 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T04:29:32,415 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T04:29:32,415 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T04:29:32,415 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c2a32e16c274,42739,1732076972014 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T04:29:32,417 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:29:32,417 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:29:32,417 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:29:32,417 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:29:32,417 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c2a32e16c274:0, corePoolSize=10, maxPoolSize=10 2024-11-20T04:29:32,417 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,417 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:29:32,417 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,426 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:29:32,426 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T04:29:32,428 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:32,428 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T04:29:32,432 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732077002432 2024-11-20T04:29:32,433 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T04:29:32,433 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T04:29:32,433 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T04:29:32,433 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T04:29:32,433 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T04:29:32,433 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T04:29:32,436 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,437 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T04:29:32,437 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T04:29:32,437 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T04:29:32,438 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T04:29:32,438 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T04:29:32,440 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732076972438,5,FailOnTimeoutGroup] 2024-11-20T04:29:32,441 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732076972440,5,FailOnTimeoutGroup] 2024-11-20T04:29:32,441 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,441 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T04:29:32,441 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,441 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45707 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:29:32,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36545 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:29:32,453 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T04:29:32,453 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094 2024-11-20T04:29:32,458 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(746): ClusterId : 930d9cac-07d5-45bc-bf1a-b2222d362bc4 2024-11-20T04:29:32,458 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T04:29:32,462 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T04:29:32,462 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T04:29:32,464 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T04:29:32,465 DEBUG [RS:0;c2a32e16c274:40241 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d00c3aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:29:32,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45707 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:29:32,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36545 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:29:32,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:32,488 DEBUG [RS:0;c2a32e16c274:40241 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c2a32e16c274:40241 2024-11-20T04:29:32,488 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T04:29:32,488 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T04:29:32,489 DEBUG [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T04:29:32,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:29:32,490 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2a32e16c274,42739,1732076972014 with port=40241, startcode=1732076972119 2024-11-20T04:29:32,491 DEBUG [RS:0;c2a32e16c274:40241 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T04:29:32,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:29:32,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:32,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:32,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:29:32,494 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:29:32,494 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:32,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:32,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:29:32,497 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:29:32,497 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:32,497 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55507, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T04:29:32,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:32,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:29:32,498 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42739 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2a32e16c274,40241,1732076972119 2024-11-20T04:29:32,498 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42739 {}] master.ServerManager(517): Registering regionserver=c2a32e16c274,40241,1732076972119 2024-11-20T04:29:32,499 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:29:32,499 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:32,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:32,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:29:32,501 DEBUG [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094 2024-11-20T04:29:32,501 DEBUG [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40051 2024-11-20T04:29:32,501 DEBUG [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T04:29:32,501 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740 2024-11-20T04:29:32,501 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740 2024-11-20T04:29:32,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:29:32,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:29:32,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:29:32,504 DEBUG [RS:0;c2a32e16c274:40241 {}] zookeeper.ZKUtil(111): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2a32e16c274,40241,1732076972119 2024-11-20T04:29:32,504 WARN [RS:0;c2a32e16c274:40241 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:29:32,504 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:29:32,504 INFO [RS:0;c2a32e16c274:40241 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:29:32,504 DEBUG [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119 2024-11-20T04:29:32,504 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2a32e16c274,40241,1732076972119] 2024-11-20T04:29:32,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:29:32,509 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:29:32,510 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765966, jitterRate=-0.026024073362350464}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:29:32,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732076972481Initializing all the Stores at 1732076972482 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076972482Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076972489 (+7 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076972489Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076972489Cleaning up temporary data from old regions at 1732076972503 (+14 ms)Region opened successfully at 1732076972511 (+8 ms) 2024-11-20T04:29:32,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:29:32,511 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:29:32,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:29:32,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:29:32,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:29:32,512 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:29:32,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732076972511Disabling compacts and flushes for region at 1732076972511Disabling writes for close at 1732076972511Writing region close event to WAL at 1732076972511Closed at 1732076972512 (+1 ms) 2024-11-20T04:29:32,512 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T04:29:32,513 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:29:32,513 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T04:29:32,513 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T04:29:32,515 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:29:32,517 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T04:29:32,520 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T04:29:32,521 INFO [RS:0;c2a32e16c274:40241 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T04:29:32,521 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,521 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T04:29:32,523 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T04:29:32,523 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,523 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,524 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,524 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:32,524 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:29:32,524 DEBUG [RS:0;c2a32e16c274:40241 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:29:32,529 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,529 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,529 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,529 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,529 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,529 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,40241,1732076972119-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:29:32,557 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T04:29:32,558 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,40241,1732076972119-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,558 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,558 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.Replication(171): c2a32e16c274,40241,1732076972119 started 2024-11-20T04:29:32,582 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:32,582 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(1482): Serving as c2a32e16c274,40241,1732076972119, RpcServer on c2a32e16c274/172.17.0.2:40241, sessionid=0x10133a312670001 2024-11-20T04:29:32,583 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T04:29:32,583 DEBUG [RS:0;c2a32e16c274:40241 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2a32e16c274,40241,1732076972119 2024-11-20T04:29:32,583 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,40241,1732076972119' 2024-11-20T04:29:32,583 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T04:29:32,583 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T04:29:32,584 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T04:29:32,584 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T04:29:32,584 DEBUG [RS:0;c2a32e16c274:40241 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2a32e16c274,40241,1732076972119 2024-11-20T04:29:32,584 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,40241,1732076972119' 2024-11-20T04:29:32,584 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T04:29:32,585 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T04:29:32,585 DEBUG [RS:0;c2a32e16c274:40241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T04:29:32,585 INFO [RS:0;c2a32e16c274:40241 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T04:29:32,585 INFO [RS:0;c2a32e16c274:40241 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T04:29:32,667 WARN [c2a32e16c274:42739 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T04:29:32,689 INFO [RS:0;c2a32e16c274:40241 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C40241%2C1732076972119, suffix=, logDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119, archiveDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs, maxLogs=32 2024-11-20T04:29:32,690 INFO [RS:0;c2a32e16c274:40241 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40241%2C1732076972119.1732076972690 2024-11-20T04:29:32,704 INFO [RS:0;c2a32e16c274:40241 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 2024-11-20T04:29:32,718 DEBUG [RS:0;c2a32e16c274:40241 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41075:41075),(127.0.0.1/127.0.0.1:38063:38063)] 2024-11-20T04:29:32,917 DEBUG [c2a32e16c274:42739 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T04:29:32,918 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c2a32e16c274,40241,1732076972119 2024-11-20T04:29:32,920 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,40241,1732076972119, state=OPENING 2024-11-20T04:29:32,926 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T04:29:32,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:29:32,931 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:29:32,932 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:29:32,932 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:29:32,933 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,40241,1732076972119}] 2024-11-20T04:29:33,087 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T04:29:33,090 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40511, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T04:29:33,139 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T04:29:33,140 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:29:33,143 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C40241%2C1732076972119.meta, suffix=.meta, logDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119, archiveDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs, maxLogs=32 2024-11-20T04:29:33,144 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta 2024-11-20T04:29:33,159 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta 2024-11-20T04:29:33,163 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41075:41075),(127.0.0.1/127.0.0.1:38063:38063)] 2024-11-20T04:29:33,165 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:29:33,166 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T04:29:33,166 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T04:29:33,166 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T04:29:33,166 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T04:29:33,166 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:33,166 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T04:29:33,167 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T04:29:33,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:29:33,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:29:33,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:33,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:33,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:29:33,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:29:33,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:33,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:33,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:29:33,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:29:33,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:33,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:33,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:29:33,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:29:33,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:33,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:29:33,183 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:29:33,184 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740 2024-11-20T04:29:33,185 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740 2024-11-20T04:29:33,187 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:29:33,187 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:29:33,188 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:29:33,190 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:29:33,191 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830704, jitterRate=0.05629540979862213}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:29:33,191 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T04:29:33,192 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732076973167Writing region info on filesystem at 1732076973167Initializing all the Stores at 1732076973173 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076973173Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076973174 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076973174Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732076973174Cleaning up temporary data from old regions at 1732076973187 (+13 ms)Running coprocessor post-open hooks at 1732076973191 (+4 ms)Region opened successfully at 1732076973192 (+1 ms) 2024-11-20T04:29:33,193 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732076973087 2024-11-20T04:29:33,197 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T04:29:33,197 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T04:29:33,198 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,40241,1732076972119 2024-11-20T04:29:33,199 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,40241,1732076972119, state=OPEN 2024-11-20T04:29:33,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:29:33,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:29:33,205 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c2a32e16c274,40241,1732076972119 2024-11-20T04:29:33,205 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:29:33,205 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:29:33,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T04:29:33,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,40241,1732076972119 in 272 msec 2024-11-20T04:29:33,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T04:29:33,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 695 msec 2024-11-20T04:29:33,212 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:29:33,212 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T04:29:33,214 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:29:33,214 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,40241,1732076972119, seqNum=-1] 2024-11-20T04:29:33,215 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:29:33,217 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34109, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:29:33,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 808 msec 2024-11-20T04:29:33,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732076973224, completionTime=-1 2024-11-20T04:29:33,224 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T04:29:33,224 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T04:29:33,226 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T04:29:33,226 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732077033226 2024-11-20T04:29:33,226 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732077093226 2024-11-20T04:29:33,226 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T04:29:33,227 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,42739,1732076972014-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,227 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,42739,1732076972014-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,227 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,42739,1732076972014-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,227 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c2a32e16c274:42739, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,227 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,227 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,229 DEBUG [master/c2a32e16c274:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T04:29:33,231 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.055sec 2024-11-20T04:29:33,231 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T04:29:33,231 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T04:29:33,231 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T04:29:33,231 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T04:29:33,231 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T04:29:33,231 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,42739,1732076972014-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:29:33,231 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,42739,1732076972014-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T04:29:33,234 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T04:29:33,234 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T04:29:33,234 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,42739,1732076972014-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,258 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b274db6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:29:33,258 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c2a32e16c274,42739,-1 for getting cluster id 2024-11-20T04:29:33,259 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T04:29:33,260 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '930d9cac-07d5-45bc-bf1a-b2222d362bc4' 2024-11-20T04:29:33,261 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T04:29:33,261 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "930d9cac-07d5-45bc-bf1a-b2222d362bc4" 2024-11-20T04:29:33,261 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2db3caf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:29:33,261 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c2a32e16c274,42739,-1] 2024-11-20T04:29:33,262 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T04:29:33,262 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:29:33,263 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59736, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T04:29:33,264 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7feb24a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:29:33,265 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:29:33,266 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,40241,1732076972119, seqNum=-1] 2024-11-20T04:29:33,266 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:29:33,267 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47414, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:29:33,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c2a32e16c274,42739,1732076972014 2024-11-20T04:29:33,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:33,273 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T04:29:33,296 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:29:33,296 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:33,296 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:33,296 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:29:33,296 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:29:33,296 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:29:33,296 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T04:29:33,296 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:29:33,297 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41341 2024-11-20T04:29:33,299 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41341 connecting to ZooKeeper ensemble=127.0.0.1:53242 2024-11-20T04:29:33,300 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:33,302 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:29:33,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413410x0, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:29:33,307 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:413410x0, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-20T04:29:33,307 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41341-0x10133a312670002 connected 2024-11-20T04:29:33,307 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-20T04:29:33,308 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T04:29:33,309 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T04:29:33,309 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41341-0x10133a312670002, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T04:29:33,311 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41341-0x10133a312670002, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:29:33,311 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41341 2024-11-20T04:29:33,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41341 2024-11-20T04:29:33,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41341 2024-11-20T04:29:33,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41341 2024-11-20T04:29:33,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41341 2024-11-20T04:29:33,317 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(746): ClusterId : 930d9cac-07d5-45bc-bf1a-b2222d362bc4 2024-11-20T04:29:33,317 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T04:29:33,319 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T04:29:33,319 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T04:29:33,322 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T04:29:33,323 DEBUG [RS:1;c2a32e16c274:41341 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@768daa7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:29:33,336 DEBUG [RS:1;c2a32e16c274:41341 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c2a32e16c274:41341 2024-11-20T04:29:33,336 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T04:29:33,336 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T04:29:33,336 DEBUG [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T04:29:33,337 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2a32e16c274,42739,1732076972014 with port=41341, startcode=1732076973295 2024-11-20T04:29:33,337 DEBUG [RS:1;c2a32e16c274:41341 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T04:29:33,339 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50345, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T04:29:33,340 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42739 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2a32e16c274,41341,1732076973295 2024-11-20T04:29:33,340 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42739 {}] master.ServerManager(517): Registering regionserver=c2a32e16c274,41341,1732076973295 2024-11-20T04:29:33,342 DEBUG [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094 2024-11-20T04:29:33,343 DEBUG [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40051 2024-11-20T04:29:33,343 DEBUG [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T04:29:33,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:29:33,346 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2a32e16c274,41341,1732076973295] 2024-11-20T04:29:33,346 DEBUG [RS:1;c2a32e16c274:41341 {}] zookeeper.ZKUtil(111): regionserver:41341-0x10133a312670002, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2a32e16c274,41341,1732076973295 2024-11-20T04:29:33,346 WARN [RS:1;c2a32e16c274:41341 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:29:33,346 INFO [RS:1;c2a32e16c274:41341 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:29:33,346 DEBUG [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295 2024-11-20T04:29:33,352 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T04:29:33,355 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T04:29:33,355 INFO [RS:1;c2a32e16c274:41341 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T04:29:33,356 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,356 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T04:29:33,357 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T04:29:33,357 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,357 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,357 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,357 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:29:33,358 DEBUG [RS:1;c2a32e16c274:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:29:33,361 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,361 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,361 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,361 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,361 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,361 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,41341,1732076973295-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:29:33,378 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T04:29:33,378 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,41341,1732076973295-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,378 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,379 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.Replication(171): c2a32e16c274,41341,1732076973295 started 2024-11-20T04:29:33,394 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:29:33,394 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(1482): Serving as c2a32e16c274,41341,1732076973295, RpcServer on c2a32e16c274/172.17.0.2:41341, sessionid=0x10133a312670002 2024-11-20T04:29:33,395 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;c2a32e16c274:41341,5,FailOnTimeoutGroup] 2024-11-20T04:29:33,395 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-20T04:29:33,396 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T04:29:33,396 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T04:29:33,396 DEBUG [RS:1;c2a32e16c274:41341 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2a32e16c274,41341,1732076973295 2024-11-20T04:29:33,396 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,41341,1732076973295' 2024-11-20T04:29:33,396 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T04:29:33,397 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T04:29:33,397 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is c2a32e16c274,42739,1732076972014 2024-11-20T04:29:33,397 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@f44dd06 2024-11-20T04:29:33,398 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T04:29:33,398 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T04:29:33,398 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T04:29:33,398 DEBUG [RS:1;c2a32e16c274:41341 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2a32e16c274,41341,1732076973295 2024-11-20T04:29:33,398 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,41341,1732076973295' 2024-11-20T04:29:33,398 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T04:29:33,399 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T04:29:33,399 DEBUG [RS:1;c2a32e16c274:41341 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T04:29:33,399 INFO [RS:1;c2a32e16c274:41341 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T04:29:33,399 INFO [RS:1;c2a32e16c274:41341 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T04:29:33,403 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59740, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T04:29:33,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42739 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T04:29:33,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42739 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T04:29:33,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42739 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:29:33,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42739 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T04:29:33,409 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T04:29:33,409 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:33,409 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42739 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-20T04:29:33,410 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T04:29:33,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42739 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:29:33,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36545 is added to blk_1073741835_1011 (size=393) 2024-11-20T04:29:33,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45707 is added to blk_1073741835_1011 (size=393) 2024-11-20T04:29:33,419 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d9eda11a98fdcfdd972f5f79618c0ab6, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094 2024-11-20T04:29:33,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36545 is added to blk_1073741836_1012 (size=76) 2024-11-20T04:29:33,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45707 is added to blk_1073741836_1012 (size=76) 2024-11-20T04:29:33,430 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:33,430 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing d9eda11a98fdcfdd972f5f79618c0ab6, disabling compactions & flushes 2024-11-20T04:29:33,430 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:33,430 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:33,430 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. after waiting 0 ms 2024-11-20T04:29:33,430 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:33,430 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:33,430 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for d9eda11a98fdcfdd972f5f79618c0ab6: Waiting for close lock at 1732076973430Disabling compacts and flushes for region at 1732076973430Disabling writes for close at 1732076973430Writing region close event to WAL at 1732076973430Closed at 1732076973430 2024-11-20T04:29:33,432 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T04:29:33,432 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732076973432"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732076973432"}]},"ts":"1732076973432"} 2024-11-20T04:29:33,435 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T04:29:33,437 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T04:29:33,437 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732076973437"}]},"ts":"1732076973437"} 2024-11-20T04:29:33,439 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-20T04:29:33,440 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d9eda11a98fdcfdd972f5f79618c0ab6, ASSIGN}] 2024-11-20T04:29:33,441 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d9eda11a98fdcfdd972f5f79618c0ab6, ASSIGN 2024-11-20T04:29:33,443 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d9eda11a98fdcfdd972f5f79618c0ab6, ASSIGN; state=OFFLINE, location=c2a32e16c274,40241,1732076972119; forceNewPlan=false, retain=false 2024-11-20T04:29:33,502 INFO [RS:1;c2a32e16c274:41341 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C41341%2C1732076973295, suffix=, logDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295, archiveDir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs, maxLogs=32 2024-11-20T04:29:33,502 INFO [RS:1;c2a32e16c274:41341 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41341%2C1732076973295.1732076973502 2024-11-20T04:29:33,509 INFO [RS:1;c2a32e16c274:41341 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 2024-11-20T04:29:33,511 DEBUG [RS:1;c2a32e16c274:41341 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38063:38063),(127.0.0.1/127.0.0.1:41075:41075)] 2024-11-20T04:29:33,594 INFO [c2a32e16c274:42739 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-20T04:29:33,594 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d9eda11a98fdcfdd972f5f79618c0ab6, regionState=OPENING, regionLocation=c2a32e16c274,40241,1732076972119 2024-11-20T04:29:33,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d9eda11a98fdcfdd972f5f79618c0ab6, ASSIGN because future has completed 2024-11-20T04:29:33,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d9eda11a98fdcfdd972f5f79618c0ab6, server=c2a32e16c274,40241,1732076972119}] 2024-11-20T04:29:33,755 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:33,756 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d9eda11a98fdcfdd972f5f79618c0ab6, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:29:33,756 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,756 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:29:33,756 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,756 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,758 INFO [StoreOpener-d9eda11a98fdcfdd972f5f79618c0ab6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,760 INFO [StoreOpener-d9eda11a98fdcfdd972f5f79618c0ab6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d9eda11a98fdcfdd972f5f79618c0ab6 columnFamilyName info 2024-11-20T04:29:33,760 DEBUG [StoreOpener-d9eda11a98fdcfdd972f5f79618c0ab6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:29:33,760 INFO [StoreOpener-d9eda11a98fdcfdd972f5f79618c0ab6-1 {}] regionserver.HStore(327): Store=d9eda11a98fdcfdd972f5f79618c0ab6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:29:33,760 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,761 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,762 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,762 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,762 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,764 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,767 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:29:33,767 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d9eda11a98fdcfdd972f5f79618c0ab6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716976, jitterRate=-0.08831876516342163}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T04:29:33,768 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:33,768 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d9eda11a98fdcfdd972f5f79618c0ab6: Running coprocessor pre-open hook at 1732076973757Writing region info on filesystem at 1732076973757Initializing all the Stores at 1732076973758 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732076973758Cleaning up temporary data from old regions at 1732076973762 (+4 ms)Running coprocessor post-open hooks at 1732076973768 (+6 ms)Region opened successfully at 1732076973768 2024-11-20T04:29:33,770 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6., pid=6, masterSystemTime=1732076973751 2024-11-20T04:29:33,772 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:33,772 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:33,773 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d9eda11a98fdcfdd972f5f79618c0ab6, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,40241,1732076972119 2024-11-20T04:29:33,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d9eda11a98fdcfdd972f5f79618c0ab6, server=c2a32e16c274,40241,1732076972119 because future has completed 2024-11-20T04:29:33,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T04:29:33,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d9eda11a98fdcfdd972f5f79618c0ab6, server=c2a32e16c274,40241,1732076972119 in 185 msec 2024-11-20T04:29:33,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T04:29:33,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=d9eda11a98fdcfdd972f5f79618c0ab6, ASSIGN in 345 msec 2024-11-20T04:29:33,789 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T04:29:33,790 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732076973789"}]},"ts":"1732076973789"} 2024-11-20T04:29:33,792 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-20T04:29:33,793 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T04:29:33,795 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 389 msec 2024-11-20T04:29:38,669 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T04:29:38,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:38,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:38,686 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:38,686 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:29:38,694 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-20T04:29:41,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T04:29:41,224 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T04:29:41,225 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T04:29:41,225 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-20T04:29:41,226 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:29:41,226 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T04:29:41,226 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T04:29:41,226 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T04:29:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42739 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:29:43,431 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-20T04:29:43,431 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-20T04:29:43,434 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T04:29:43,434 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:43,447 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:43,451 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:43,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:43,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:43,453 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:29:43,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:43,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:43,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b5be5aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/java.io.tmpdir/jetty-localhost-38251-hadoop-hdfs-3_4_1-tests_jar-_-any-2626431916756404628/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:43,574 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:38251} 2024-11-20T04:29:43,574 INFO [Time-limited test {}] server.Server(415): Started @117652ms 2024-11-20T04:29:43,575 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:29:43,612 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:43,616 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:43,616 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:43,616 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:43,616 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:29:43,617 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:43,617 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:43,683 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:43,683 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:43,707 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:29:43,711 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe64226b173f4034e with lease ID 0x271a803eb366b81e: Processing first storage report for DS-55e2085b-2e9c-4ffc-84df-259f8c67b689 from datanode DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:43,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe64226b173f4034e with lease ID 0x271a803eb366b81e: from storage DS-55e2085b-2e9c-4ffc-84df-259f8c67b689 node DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T04:29:43,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe64226b173f4034e with lease ID 0x271a803eb366b81e: Processing first storage report for DS-7d7241aa-b79e-4b82-885f-31433659510b from datanode DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:43,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe64226b173f4034e with lease ID 0x271a803eb366b81e: from storage DS-7d7241aa-b79e-4b82-885f-31433659510b node DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:43,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30add41a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/java.io.tmpdir/jetty-localhost-38409-hadoop-hdfs-3_4_1-tests_jar-_-any-288801977365567049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:43,745 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:38409} 2024-11-20T04:29:43,745 INFO [Time-limited test {}] server.Server(415): Started @117824ms 2024-11-20T04:29:43,746 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:29:43,788 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:43,792 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:43,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:43,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:43,797 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:29:43,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@492554e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:43,800 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e1eaefc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:43,856 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data7/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:43,856 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data8/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:43,879 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:29:43,881 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e5444c778dde26 with lease ID 0x271a803eb366b81f: Processing first storage report for DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5 from datanode DatanodeRegistration(127.0.0.1:46105, datanodeUuid=22f86c90-eb64-4926-8df8-0fd8a7dec69b, infoPort=43277, infoSecurePort=0, ipcPort=32897, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:43,881 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e5444c778dde26 with lease ID 0x271a803eb366b81f: from storage DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5 node DatanodeRegistration(127.0.0.1:46105, datanodeUuid=22f86c90-eb64-4926-8df8-0fd8a7dec69b, infoPort=43277, infoSecurePort=0, ipcPort=32897, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:43,882 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e5444c778dde26 with lease ID 0x271a803eb366b81f: Processing first storage report for DS-3183366d-d83c-4d7b-9fda-12a959ac2bce from datanode DatanodeRegistration(127.0.0.1:46105, datanodeUuid=22f86c90-eb64-4926-8df8-0fd8a7dec69b, infoPort=43277, infoSecurePort=0, ipcPort=32897, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:43,882 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e5444c778dde26 with lease ID 0x271a803eb366b81f: from storage DS-3183366d-d83c-4d7b-9fda-12a959ac2bce node DatanodeRegistration(127.0.0.1:46105, datanodeUuid=22f86c90-eb64-4926-8df8-0fd8a7dec69b, infoPort=43277, infoSecurePort=0, ipcPort=32897, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:43,923 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ef6496b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/java.io.tmpdir/jetty-localhost-38627-hadoop-hdfs-3_4_1-tests_jar-_-any-2646851648019074257/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:43,923 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d82aaea{HTTP/1.1, (http/1.1)}{localhost:38627} 2024-11-20T04:29:43,923 INFO [Time-limited test {}] server.Server(415): Started @118002ms 2024-11-20T04:29:43,925 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:29:44,026 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data9/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:44,026 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data10/current/BP-1675406404-172.17.0.2-1732076970975/current, will proceed with Du for space computation calculation, 2024-11-20T04:29:44,049 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:29:44,052 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x486d00dc36b42090 with lease ID 0x271a803eb366b820: Processing first storage report for DS-ae1bd8d8-75ee-476b-bf05-2b5216819790 from datanode DatanodeRegistration(127.0.0.1:39517, datanodeUuid=fbe6ea60-46f1-4d16-888d-ceb4c1425ec0, infoPort=37391, infoSecurePort=0, ipcPort=38295, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:44,052 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x486d00dc36b42090 with lease ID 0x271a803eb366b820: from storage DS-ae1bd8d8-75ee-476b-bf05-2b5216819790 node DatanodeRegistration(127.0.0.1:39517, datanodeUuid=fbe6ea60-46f1-4d16-888d-ceb4c1425ec0, infoPort=37391, infoSecurePort=0, ipcPort=38295, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:44,052 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x486d00dc36b42090 with lease ID 0x271a803eb366b820: Processing first storage report for DS-18ad4175-5e04-4208-bc2e-2f0fb6f8bc29 from datanode DatanodeRegistration(127.0.0.1:39517, datanodeUuid=fbe6ea60-46f1-4d16-888d-ceb4c1425ec0, infoPort=37391, infoSecurePort=0, ipcPort=38295, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975) 2024-11-20T04:29:44,052 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x486d00dc36b42090 with lease ID 0x271a803eb366b820: from storage DS-18ad4175-5e04-4208-bc2e-2f0fb6f8bc29 node DatanodeRegistration(127.0.0.1:39517, datanodeUuid=fbe6ea60-46f1-4d16-888d-ceb4c1425ec0, infoPort=37391, infoSecurePort=0, ipcPort=38295, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:44,147 WARN [ResponseProcessor for block BP-1675406404-172.17.0.2-1732076970975:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1675406404-172.17.0.2-1732076970975:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,147 WARN [ResponseProcessor for block BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,148 WARN [ResponseProcessor for block BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,147 WARN [ResponseProcessor for block BP-1675406404-172.17.0.2-1732076970975:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1675406404-172.17.0.2-1732076970975:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,148 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 block BP-1675406404-172.17.0.2-1732076970975:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:44,148 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 block BP-1675406404-172.17.0.2-1732076970975:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:44,148 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 block BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:44,148 WARN [PacketResponder: BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45707] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,149 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta block BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:44,149 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1710338394_22 at /127.0.0.1:58024 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36545:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58024 dst: /127.0.0.1:36545 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,149 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:46610 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45707:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46610 dst: /127.0.0.1:45707 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,149 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2036382949_22 at /127.0.0.1:46592 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45707:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46592 dst: /127.0.0.1:45707 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:57990 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36545:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57990 dst: /127.0.0.1:36545 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:46618 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45707:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46618 dst: /127.0.0.1:45707 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2036382949_22 at /127.0.0.1:57970 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36545:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57970 dst: /127.0.0.1:36545 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:58000 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36545:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58000 dst: /127.0.0.1:36545 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1710338394_22 at /127.0.0.1:46644 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45707:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46644 dst: /127.0.0.1:45707 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21a23b22{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:44,152 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21f2acf7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:44,153 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:44,153 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c80aceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:44,153 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4549eece{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:44,155 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:29:44,155 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:29:44,155 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1675406404-172.17.0.2-1732076970975 (Datanode Uuid 83959f4a-849d-48e1-94ea-6cb816411dc3) service to localhost/127.0.0.1:40051 2024-11-20T04:29:44,155 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:29:44,155 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data3/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:44,156 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data4/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:44,156 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:29:44,156 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@5ddde9d0 {}] datanode.DataXceiver(331): 127.0.0.1:36545:DataXceiver error processing unknown operation src: /127.0.0.1:49566 dst: /127.0.0.1:36545 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,157 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta block BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,156 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@1cd086eb {}] datanode.DataXceiver(331): 127.0.0.1:36545:DataXceiver error processing unknown operation src: /127.0.0.1:49568 dst: /127.0.0.1:36545 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:44,157 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 block BP-1675406404-172.17.0.2-1732076970975:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,157 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 block BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,158 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 block BP-1675406404-172.17.0.2-1732076970975:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3150e6db{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:44,165 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@539e83f9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:44,165 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:44,166 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@259c861e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:44,166 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5917cb43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:44,168 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:29:44,168 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:29:44,168 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1675406404-172.17.0.2-1732076970975 (Datanode Uuid a43b2b1a-5865-4441-b03b-2a8b632bbbd0) service to localhost/127.0.0.1:40051 2024-11-20T04:29:44,168 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:29:44,168 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data1/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:44,169 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data2/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:44,169 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:29:44,173 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6., hostname=c2a32e16c274,40241,1732076972119, seqNum=2] 2024-11-20T04:29:44,177 ERROR [FSHLog-0-hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094-prefix:c2a32e16c274,40241,1732076972119 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,177 WARN [FSHLog-0-hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094-prefix:c2a32e16c274,40241,1732076972119 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,178 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,178 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C40241%2C1732076972119:(num 1732076972690) roll requested 2024-11-20T04:29:44,178 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40241%2C1732076972119.1732076984178 2024-11-20T04:29:44,182 WARN [Thread-901 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,182 WARN [Thread-901 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:44,182 WARN [Thread-901 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741838_1018 2024-11-20T04:29:44,184 WARN [Thread-901 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:44,194 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:44,194 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:44,194 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:44,194 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:44,194 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:44,194 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076984178 2024-11-20T04:29:44,195 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,195 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:44,196 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-20T04:29:44,197 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-20T04:29:44,197 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43277:43277),(127.0.0.1/127.0.0.1:37391:37391)] 2024-11-20T04:29:44,197 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 2024-11-20T04:29:44,197 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 is not closed yet, will try archiving it next time 2024-11-20T04:29:44,200 WARN [IPC Server handler 3 on default port 40051 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-20T04:29:44,203 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 after 5ms 2024-11-20T04:29:44,250 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:45,359 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:46,197 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:46,198 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076984178 2024-11-20T04:29:46,199 WARN [ResponseProcessor for block BP-1675406404-172.17.0.2-1732076970975:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1675406404-172.17.0.2-1732076970975:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:46,199 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076984178 block BP-1675406404-172.17.0.2-1732076970975:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:46,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:49780 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:46105:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49780 dst: /127.0.0.1:46105 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:46,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:45302 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:39517:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45302 dst: /127.0.0.1:39517 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:46,204 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30add41a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:46,204 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fd17220{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:46,204 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:46,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dc59954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:46,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ff2063{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:46,206 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:29:46,206 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:29:46,206 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1675406404-172.17.0.2-1732076970975 (Datanode Uuid 22f86c90-eb64-4926-8df8-0fd8a7dec69b) service to localhost/127.0.0.1:40051 2024-11-20T04:29:46,206 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:29:46,207 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data7/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:46,207 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data8/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:46,207 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:29:46,251 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:47,359 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:48,197 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:48,198 WARN [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]] 2024-11-20T04:29:48,198 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C40241%2C1732076972119:(num 1732076984178) roll requested 2024-11-20T04:29:48,198 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40241%2C1732076972119.1732076988198 2024-11-20T04:29:48,201 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:48,202 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:48,202 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741840_1022 2024-11-20T04:29:48,202 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:48,204 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 after 4007ms 2024-11-20T04:29:48,206 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:48,206 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:48,206 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:48,207 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:48,207 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:48,207 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076984178 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076988198 2024-11-20T04:29:48,208 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37513:37513),(127.0.0.1/127.0.0.1:37391:37391)] 2024-11-20T04:29:48,208 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 is not closed yet, will try archiving it next time 2024-11-20T04:29:48,208 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076984178 is not closed yet, will try archiving it next time 2024-11-20T04:29:48,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39517 is added to blk_1073741839_1021 (size=2431) 2024-11-20T04:29:48,209 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 is not closed yet, will try archiving it next time 2024-11-20T04:29:48,213 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T04:29:48,251 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:49,360 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,066 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@50b67a8f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39517, datanodeUuid=fbe6ea60-46f1-4d16-888d-ceb4c1425ec0, infoPort=37391, infoSecurePort=0, ipcPort=38295, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741839_1021 to 127.0.0.1:45707 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,208 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,216 WARN [ResponseProcessor for block BP-1675406404-172.17.0.2-1732076970975:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1675406404-172.17.0.2-1732076970975:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-1675406404-172.17.0.2-1732076970975:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,217 WARN [DataStreamer for file /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076988198 block BP-1675406404-172.17.0.2-1732076970975:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:50,217 WARN [PacketResponder: BP-1675406404-172.17.0.2-1732076970975:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39517] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,218 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59370 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59370 dst: /127.0.0.1:39709 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,218 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:45322 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:39517:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45322 dst: /127.0.0.1:39517 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ef6496b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:50,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d82aaea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:29:50,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:29:50,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e1eaefc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:29:50,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@492554e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,STOPPED} 2024-11-20T04:29:50,223 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:29:50,223 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:29:50,224 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:29:50,224 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1675406404-172.17.0.2-1732076970975 (Datanode Uuid fbe6ea60-46f1-4d16-888d-ceb4c1425ec0) service to localhost/127.0.0.1:40051 2024-11-20T04:29:50,225 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data9/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:50,225 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data10/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:29:50,225 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:29:50,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40241 {}] regionserver.HRegion(8855): Flush requested on d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:50,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9eda11a98fdcfdd972f5f79618c0ab6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:29:50,251 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/c13f3d2d58684aa69624fee242901e97 is 1080, key is row0002/info:/1732076986209/Put/seqid=0 2024-11-20T04:29:50,262 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39517 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,261 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59388 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741842_1025 to mirror 127.0.0.1:39517 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,262 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:50,262 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741842_1025 2024-11-20T04:29:50,262 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59388 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:50,262 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59388 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59388 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,262 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:29:50,265 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36545 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59392 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741843_1026 to mirror 127.0.0.1:36545 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,265 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:50,265 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741843_1026 2024-11-20T04:29:50,265 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59392 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:50,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59392 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59392 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,269 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:50,271 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45707 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,271 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59400 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741844_1027 to mirror 127.0.0.1:45707 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,271 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:50,271 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741844_1027 2024-11-20T04:29:50,271 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59400 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:50,271 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:59400 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59400 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,272 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:50,273 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,273 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:50,273 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741845_1028 2024-11-20T04:29:50,274 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:29:50,275 WARN [IPC Server handler 2 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T04:29:50,275 WARN [IPC Server handler 2 on default port 40051 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T04:29:50,275 WARN [IPC Server handler 2 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T04:29:50,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741846_1029 (size=10347) 2024-11-20T04:29:50,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/c13f3d2d58684aa69624fee242901e97 2024-11-20T04:29:50,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/c13f3d2d58684aa69624fee242901e97 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/c13f3d2d58684aa69624fee242901e97 2024-11-20T04:29:50,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/c13f3d2d58684aa69624fee242901e97, entries=5, sequenceid=11, filesize=10.1 K 2024-11-20T04:29:50,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for d9eda11a98fdcfdd972f5f79618c0ab6 in 458ms, sequenceid=11, compaction requested=false 2024-11-20T04:29:50,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9eda11a98fdcfdd972f5f79618c0ab6: 2024-11-20T04:29:50,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40241 {}] regionserver.HRegion(8855): Flush requested on d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:50,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9eda11a98fdcfdd972f5f79618c0ab6 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-20T04:29:50,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/1fdf186d3a9b46438c1ea18e8ae24f30 is 1080, key is row0007/info:/1732076990237/Put/seqid=0 2024-11-20T04:29:50,874 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,874 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:50,874 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741847_1030 2024-11-20T04:29:50,874 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:50,875 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,875 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:50,875 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741848_1031 2024-11-20T04:29:50,876 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:50,878 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46105 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,878 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:50,878 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32840 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741849_1032 to mirror 127.0.0.1:46105 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,878 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741849_1032 2024-11-20T04:29:50,878 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32840 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:50,878 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32840 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32840 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:50,879 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:29:50,880 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:50,880 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:50,880 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741850_1033 2024-11-20T04:29:50,880 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:29:50,881 WARN [IPC Server handler 4 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T04:29:50,881 WARN [IPC Server handler 4 on default port 40051 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T04:29:50,881 WARN [IPC Server handler 4 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T04:29:50,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741851_1034 (size=12506) 2024-11-20T04:29:51,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/1fdf186d3a9b46438c1ea18e8ae24f30 2024-11-20T04:29:51,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/1fdf186d3a9b46438c1ea18e8ae24f30 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/1fdf186d3a9b46438c1ea18e8ae24f30 2024-11-20T04:29:51,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/1fdf186d3a9b46438c1ea18e8ae24f30, entries=7, sequenceid=24, filesize=12.2 K 2024-11-20T04:29:51,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for d9eda11a98fdcfdd972f5f79618c0ab6 in 431ms, sequenceid=24, compaction requested=false 2024-11-20T04:29:51,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9eda11a98fdcfdd972f5f79618c0ab6: 2024-11-20T04:29:51,298 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-20T04:29:51,298 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:51,299 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/1fdf186d3a9b46438c1ea18e8ae24f30 because midkey is the same as first or last row 2024-11-20T04:29:51,360 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,208 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,209 WARN [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]] 2024-11-20T04:29:52,209 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C40241%2C1732076972119:(num 1732076988198) roll requested 2024-11-20T04:29:52,209 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40241%2C1732076972119.1732076992209 2024-11-20T04:29:52,212 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,212 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:52,212 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741852_1035 2024-11-20T04:29:52,213 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:29:52,214 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,214 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:52,214 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741853_1036 2024-11-20T04:29:52,215 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:52,217 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46105 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,217 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32854 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741854_1037 to mirror 127.0.0.1:46105 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,217 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:52,217 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741854_1037 2024-11-20T04:29:52,217 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32854 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T04:29:52,218 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32854 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32854 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,218 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:29:52,219 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,219 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:52,219 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741855_1038 2024-11-20T04:29:52,220 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:52,220 WARN [IPC Server handler 3 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T04:29:52,220 WARN [IPC Server handler 3 on default port 40051 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T04:29:52,220 WARN [IPC Server handler 3 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T04:29:52,223 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:52,223 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:52,223 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:52,223 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:52,223 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:52,223 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076988198 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076992209 2024-11-20T04:29:52,224 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37513:37513)] 2024-11-20T04:29:52,224 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 is not closed yet, will try archiving it next time 2024-11-20T04:29:52,224 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076988198 is not closed yet, will try archiving it next time 2024-11-20T04:29:52,225 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076984178 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs/c2a32e16c274%2C40241%2C1732076972119.1732076984178 2024-11-20T04:29:52,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741841_1024 (size=25992) 2024-11-20T04:29:52,252 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40241 {}] regionserver.HRegion(8855): Flush requested on d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:52,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9eda11a98fdcfdd972f5f79618c0ab6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T04:29:52,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/b57c1ccb08944dd3bc623f04b1c93230 is 1079, key is tmprow/info:/1732076992285/Put/seqid=0 2024-11-20T04:29:52,293 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,293 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:52,293 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741857_1040 2024-11-20T04:29:52,294 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:52,295 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,295 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:52,295 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741858_1041 2024-11-20T04:29:52,295 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:29:52,297 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39517 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,297 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32878 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741859_1042 to mirror 127.0.0.1:39517 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,297 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:52,298 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741859_1042 2024-11-20T04:29:52,298 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32878 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:52,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32878 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32878 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,298 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:29:52,299 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,299 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:52,299 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741860_1043 2024-11-20T04:29:52,300 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:52,300 WARN [IPC Server handler 1 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T04:29:52,300 WARN [IPC Server handler 1 on default port 40051 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T04:29:52,300 WARN [IPC Server handler 1 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T04:29:52,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741861_1044 (size=6027) 2024-11-20T04:29:52,626 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 is not closed yet, will try archiving it next time 2024-11-20T04:29:52,704 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/b57c1ccb08944dd3bc623f04b1c93230 2024-11-20T04:29:52,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/b57c1ccb08944dd3bc623f04b1c93230 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/b57c1ccb08944dd3bc623f04b1c93230 2024-11-20T04:29:52,713 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5257c09a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741846_1029 to 127.0.0.1:46105 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,713 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c9024a0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741851_1034 to 127.0.0.1:36545 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/b57c1ccb08944dd3bc623f04b1c93230, entries=1, sequenceid=34, filesize=5.9 K 2024-11-20T04:29:52,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d9eda11a98fdcfdd972f5f79618c0ab6 in 433ms, sequenceid=34, compaction requested=true 2024-11-20T04:29:52,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9eda11a98fdcfdd972f5f79618c0ab6: 2024-11-20T04:29:52,719 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-20T04:29:52,719 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:52,719 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/1fdf186d3a9b46438c1ea18e8ae24f30 because midkey is the same as first or last row 2024-11-20T04:29:52,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9eda11a98fdcfdd972f5f79618c0ab6:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:29:52,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:29:52,720 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:29:52,721 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:29:52,721 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HStore(1541): d9eda11a98fdcfdd972f5f79618c0ab6/info is initiating minor compaction (all files) 2024-11-20T04:29:52,721 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d9eda11a98fdcfdd972f5f79618c0ab6/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:52,721 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/c13f3d2d58684aa69624fee242901e97, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/1fdf186d3a9b46438c1ea18e8ae24f30, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/b57c1ccb08944dd3bc623f04b1c93230] into tmpdir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp, totalSize=28.2 K 2024-11-20T04:29:52,722 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.Compactor(225): Compacting c13f3d2d58684aa69624fee242901e97, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732076986209 2024-11-20T04:29:52,722 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1fdf186d3a9b46438c1ea18e8ae24f30, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732076990237 2024-11-20T04:29:52,723 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.Compactor(225): Compacting b57c1ccb08944dd3bc623f04b1c93230, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732076992285 2024-11-20T04:29:52,737 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9eda11a98fdcfdd972f5f79618c0ab6#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:29:52,738 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/a2e6c60ae72c41ca8fd422576d345867 is 1080, key is row0002/info:/1732076986209/Put/seqid=0 2024-11-20T04:29:52,739 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,740 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:52,740 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741862_1045 2024-11-20T04:29:52,740 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:52,742 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46105 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,742 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32930 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741863_1046 to mirror 127.0.0.1:46105 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,743 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:52,743 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32930 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:52,743 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741863_1046 2024-11-20T04:29:52,743 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32930 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32930 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,743 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:29:52,745 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,745 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:52,745 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741864_1047 2024-11-20T04:29:52,746 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:52,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32946 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741865_1048 to mirror 127.0.0.1:39517 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,748 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39517 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:52,748 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32946 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:52,748 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:52,748 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741865_1048 2024-11-20T04:29:52,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32946 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32946 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:52,749 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:29:52,749 WARN [IPC Server handler 3 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T04:29:52,749 WARN [IPC Server handler 3 on default port 40051 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T04:29:52,749 WARN [IPC Server handler 3 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T04:29:52,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741866_1049 (size=17994) 2024-11-20T04:29:53,161 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/a2e6c60ae72c41ca8fd422576d345867 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867 2024-11-20T04:29:53,167 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d9eda11a98fdcfdd972f5f79618c0ab6/info of d9eda11a98fdcfdd972f5f79618c0ab6 into a2e6c60ae72c41ca8fd422576d345867(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:29:53,168 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d9eda11a98fdcfdd972f5f79618c0ab6: 2024-11-20T04:29:53,168 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6., storeName=d9eda11a98fdcfdd972f5f79618c0ab6/info, priority=13, startTime=1732076992719; duration=0sec 2024-11-20T04:29:53,168 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T04:29:53,168 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:53,168 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867 because midkey is the same as first or last row 2024-11-20T04:29:53,168 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T04:29:53,168 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:53,168 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867 because midkey is the same as first or last row 2024-11-20T04:29:53,169 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T04:29:53,169 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:53,169 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867 because midkey is the same as first or last row 2024-11-20T04:29:53,169 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:29:53,169 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9eda11a98fdcfdd972f5f79618c0ab6:info 2024-11-20T04:29:53,361 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40241 {}] regionserver.HRegion(8855): Flush requested on d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:53,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9eda11a98fdcfdd972f5f79618c0ab6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T04:29:53,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/0bb571b537ef4b78ac4943aac39a801b is 1079, key is tmprow/info:/1732076993704/Put/seqid=0 2024-11-20T04:29:53,712 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:53,712 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:53,712 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741867_1050 2024-11-20T04:29:53,713 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c9024a0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741841_1024 to 127.0.0.1:45707 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:53,713 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:53,714 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:53,714 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:53,714 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741868_1051 2024-11-20T04:29:53,715 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:53,716 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:53,716 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:53,716 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741869_1052 2024-11-20T04:29:53,717 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:29:53,718 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:53,718 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:53,718 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741870_1053 2024-11-20T04:29:53,718 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:29:53,719 WARN [IPC Server handler 1 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T04:29:53,719 WARN [IPC Server handler 1 on default port 40051 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T04:29:53,719 WARN [IPC Server handler 1 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T04:29:53,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741871_1054 (size=6027) 2024-11-20T04:29:54,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/0bb571b537ef4b78ac4943aac39a801b 2024-11-20T04:29:54,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/0bb571b537ef4b78ac4943aac39a801b as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/0bb571b537ef4b78ac4943aac39a801b 2024-11-20T04:29:54,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/0bb571b537ef4b78ac4943aac39a801b, entries=1, sequenceid=45, filesize=5.9 K 2024-11-20T04:29:54,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d9eda11a98fdcfdd972f5f79618c0ab6 in 432ms, sequenceid=45, compaction requested=false 2024-11-20T04:29:54,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9eda11a98fdcfdd972f5f79618c0ab6: 2024-11-20T04:29:54,137 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-20T04:29:54,137 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:54,137 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867 because midkey is the same as first or last row 2024-11-20T04:29:54,225 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:54,225 WARN [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]] 2024-11-20T04:29:54,225 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C40241%2C1732076972119:(num 1732076992209) roll requested 2024-11-20T04:29:54,225 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40241%2C1732076972119.1732076994225 2024-11-20T04:29:54,228 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:54,228 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:54,228 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741872_1055 2024-11-20T04:29:54,229 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:29:54,231 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39517 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:54,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32966 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741873_1056 to mirror 127.0.0.1:39517 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:54,231 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:54,231 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741873_1056 2024-11-20T04:29:54,231 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32966 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T04:29:54,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32966 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32966 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:54,232 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:29:54,233 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36545 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:54,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32978 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741874_1057 to mirror 127.0.0.1:36545 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:54,233 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:54,234 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741874_1057 2024-11-20T04:29:54,234 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32978 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T04:29:54,234 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:32978 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32978 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:54,234 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:54,235 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:54,235 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:54,235 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741875_1058 2024-11-20T04:29:54,236 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:54,236 WARN [IPC Server handler 0 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T04:29:54,236 WARN [IPC Server handler 0 on default port 40051 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T04:29:54,236 WARN [IPC Server handler 0 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T04:29:54,239 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:54,239 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:54,239 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:54,239 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:54,239 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:29:54,239 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076992209 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076994225 2024-11-20T04:29:54,240 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37513:37513)] 2024-11-20T04:29:54,240 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 is not closed yet, will try archiving it next time 2024-11-20T04:29:54,240 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076992209 is not closed yet, will try archiving it next time 2024-11-20T04:29:54,240 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076988198 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs/c2a32e16c274%2C40241%2C1732076972119.1732076988198 2024-11-20T04:29:54,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741856_1039 (size=13591) 2024-11-20T04:29:54,252 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:54,642 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 is not closed yet, will try archiving it next time 2024-11-20T04:29:55,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40241 {}] regionserver.HRegion(8855): Flush requested on d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:29:55,124 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9eda11a98fdcfdd972f5f79618c0ab6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T04:29:55,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/9030b6418e13466f9e84eb6e7e071af4 is 1079, key is tmprow/info:/1732076995123/Put/seqid=0 2024-11-20T04:29:55,131 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:55,131 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:55,131 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741877_1060 2024-11-20T04:29:55,131 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:29:55,133 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:55,133 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:55,133 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741878_1061 2024-11-20T04:29:55,133 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:55,135 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36545 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:55,135 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:33002 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741879_1062 to mirror 127.0.0.1:36545 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:55,136 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:55,136 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741879_1062 2024-11-20T04:29:55,136 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:33002 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:55,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:33002 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33002 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:55,136 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:55,138 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39517 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:55,138 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:33008 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741880_1063 to mirror 127.0.0.1:39517 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:55,138 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:55,138 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:33008 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:55,138 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741880_1063 2024-11-20T04:29:55,138 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:33008 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33008 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:55,139 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:29:55,139 WARN [IPC Server handler 0 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T04:29:55,139 WARN [IPC Server handler 0 on default port 40051 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T04:29:55,139 WARN [IPC Server handler 0 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T04:29:55,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741881_1064 (size=6027) 2024-11-20T04:29:55,361 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:55,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/9030b6418e13466f9e84eb6e7e071af4 2024-11-20T04:29:55,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/9030b6418e13466f9e84eb6e7e071af4 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/9030b6418e13466f9e84eb6e7e071af4 2024-11-20T04:29:55,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/9030b6418e13466f9e84eb6e7e071af4, entries=1, sequenceid=55, filesize=5.9 K 2024-11-20T04:29:55,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for d9eda11a98fdcfdd972f5f79618c0ab6 in 432ms, sequenceid=55, compaction requested=true 2024-11-20T04:29:55,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9eda11a98fdcfdd972f5f79618c0ab6: 2024-11-20T04:29:55,556 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-20T04:29:55,556 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:55,556 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867 because midkey is the same as first or last row 2024-11-20T04:29:55,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d9eda11a98fdcfdd972f5f79618c0ab6:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:29:55,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:29:55,557 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:29:55,558 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:29:55,558 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HStore(1541): d9eda11a98fdcfdd972f5f79618c0ab6/info is initiating minor compaction (all files) 2024-11-20T04:29:55,558 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d9eda11a98fdcfdd972f5f79618c0ab6/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:29:55,558 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/0bb571b537ef4b78ac4943aac39a801b, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/9030b6418e13466f9e84eb6e7e071af4] into tmpdir=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp, totalSize=29.3 K 2024-11-20T04:29:55,559 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.Compactor(225): Compacting a2e6c60ae72c41ca8fd422576d345867, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732076986209 2024-11-20T04:29:55,559 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0bb571b537ef4b78ac4943aac39a801b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732076993704 2024-11-20T04:29:55,559 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9030b6418e13466f9e84eb6e7e071af4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732076995123 2024-11-20T04:29:55,573 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d9eda11a98fdcfdd972f5f79618c0ab6#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:29:55,574 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/31259a7874ee48faa6f77d42ad04db35 is 1080, key is row0002/info:/1732076986209/Put/seqid=0 2024-11-20T04:29:55,576 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:55,576 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:29:55,576 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741882_1065 2024-11-20T04:29:55,576 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:29:55,578 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:55,578 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:29:55,578 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741883_1066 2024-11-20T04:29:55,578 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:29:55,581 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45707 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:55,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:33016 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6]'}, localName='127.0.0.1:39709', datanodeUuid='daca8ad2-2090-44e3-8eba-acb1eefa44a3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741884_1067 to mirror 127.0.0.1:45707 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:55,581 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK], DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]) is bad. 2024-11-20T04:29:55,581 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741884_1067 2024-11-20T04:29:55,581 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:33016 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:29:55,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:33016 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:39709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33016 dst: /127.0.0.1:39709 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:55,582 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45707,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK] 2024-11-20T04:29:55,583 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:55,583 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:29:55,583 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741885_1068 2024-11-20T04:29:55,583 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:29:55,584 WARN [IPC Server handler 1 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T04:29:55,584 WARN [IPC Server handler 1 on default port 40051 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T04:29:55,584 WARN [IPC Server handler 1 on default port 40051 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T04:29:55,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741886_1069 (size=18097) 2024-11-20T04:29:55,713 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5257c09a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741866_1049 to 127.0.0.1:39517 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:55,713 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c9024a0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741861_1044 to 127.0.0.1:45707 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:55,994 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/31259a7874ee48faa6f77d42ad04db35 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/31259a7874ee48faa6f77d42ad04db35 2024-11-20T04:29:56,001 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d9eda11a98fdcfdd972f5f79618c0ab6/info of d9eda11a98fdcfdd972f5f79618c0ab6 into 31259a7874ee48faa6f77d42ad04db35(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:29:56,001 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d9eda11a98fdcfdd972f5f79618c0ab6: 2024-11-20T04:29:56,001 INFO [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6., storeName=d9eda11a98fdcfdd972f5f79618c0ab6/info, priority=13, startTime=1732076995557; duration=0sec 2024-11-20T04:29:56,001 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T04:29:56,001 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:56,001 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/31259a7874ee48faa6f77d42ad04db35 because midkey is the same as first or last row 2024-11-20T04:29:56,002 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T04:29:56,002 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:56,002 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/31259a7874ee48faa6f77d42ad04db35 because midkey is the same as first or last row 2024-11-20T04:29:56,002 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T04:29:56,002 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:29:56,002 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/31259a7874ee48faa6f77d42ad04db35 because midkey is the same as first or last row 2024-11-20T04:29:56,002 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:29:56,002 DEBUG [RS:0;c2a32e16c274:40241-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d9eda11a98fdcfdd972f5f79618c0ab6:info 2024-11-20T04:29:56,240 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:56,241 WARN [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-20T04:29:56,253 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:56,347 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:29:56,350 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:29:56,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:29:56,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:29:56,351 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:29:56,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d6ec757{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:29:56,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17162101{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:29:56,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2edfde78{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/java.io.tmpdir/jetty-localhost-36311-hadoop-hdfs-3_4_1-tests_jar-_-any-8840286733882605944/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:29:56,466 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9536d35{HTTP/1.1, (http/1.1)}{localhost:36311} 2024-11-20T04:29:56,466 INFO [Time-limited test {}] server.Server(415): Started @130544ms 2024-11-20T04:29:56,467 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:29:56,573 WARN [Thread-990 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:29:56,581 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff145c80b3e782ba with lease ID 0x271a803eb366b821: from storage DS-fad33451-7600-49b5-b381-276f45eb9326 node DatanodeRegistration(127.0.0.1:38957, datanodeUuid=83959f4a-849d-48e1-94ea-6cb816411dc3, infoPort=37673, infoSecurePort=0, ipcPort=37133, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:56,581 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff145c80b3e782ba with lease ID 0x271a803eb366b821: from storage DS-267587b6-c838-477b-8f08-6b95c074cf82 node DatanodeRegistration(127.0.0.1:38957, datanodeUuid=83959f4a-849d-48e1-94ea-6cb816411dc3, infoPort=37673, infoSecurePort=0, ipcPort=37133, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:29:56,713 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5257c09a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741856_1039 to 127.0.0.1:36545 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:56,713 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c9024a0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741871_1054 to 127.0.0.1:46105 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:57,361 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:58,241 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:58,253 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:29:58,714 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c9024a0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741881_1064 to 127.0.0.1:39517 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:58,714 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5257c09a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741886_1069 to 127.0.0.1:46105 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:29:59,362 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:00,241 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:00,253 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:01,362 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:01,989 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T04:30:02,242 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:02,253 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:02,433 ERROR [FSHLog-0-hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData-prefix:c2a32e16c274,42739,1732076972014 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:02,433 WARN [FSHLog-0-hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData-prefix:c2a32e16c274,42739,1732076972014 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:02,433 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C42739%2C1732076972014:(num 1732076972283) roll requested 2024-11-20T04:30:02,434 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C42739%2C1732076972014.1732077002433 2024-11-20T04:30:02,437 WARN [Thread-1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:02,437 WARN [Thread-1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:30:02,437 WARN [Thread-1010 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741887_1070 2024-11-20T04:30:02,438 WARN [Thread-1010 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:30:02,440 WARN [Thread-1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:02,440 WARN [Thread-1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK], DatanodeInfoWithStorage[127.0.0.1:38957,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:30:02,440 WARN [Thread-1010 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741888_1071 2024-11-20T04:30:02,440 WARN [Thread-1010 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:30:02,445 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:02,445 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:02,445 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:02,445 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:02,445 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:02,446 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732077002433 2024-11-20T04:30:02,446 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:02,446 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:02,446 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 2024-11-20T04:30:02,447 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37513:37513),(127.0.0.1/127.0.0.1:37673:37673)] 2024-11-20T04:30:02,447 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 is not closed yet, will try archiving it next time 2024-11-20T04:30:02,447 WARN [IPC Server handler 1 on default port 40051 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-20T04:30:02,447 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 after 1ms 2024-11-20T04:30:03,362 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:04,242 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:05,363 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:06,242 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:06,449 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 after 4003ms 2024-11-20T04:30:06,597 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@23d14d16 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1675406404-172.17.0.2-1732076970975:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:36545,null,null]) java.net.ConnectException: Call From c2a32e16c274/172.17.0.2 to localhost:38571 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-20T04:30:06,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741833_1020 (size=455) 2024-11-20T04:30:07,222 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076972690 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs/c2a32e16c274%2C40241%2C1732076972119.1732076972690 2024-11-20T04:30:07,223 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076992209 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs/c2a32e16c274%2C40241%2C1732076972119.1732076992209 2024-11-20T04:30:07,363 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:07,577 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1238488f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38957, datanodeUuid=83959f4a-849d-48e1-94ea-6cb816411dc3, infoPort=37673, infoSecurePort=0, ipcPort=37133, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741833_1020 to 127.0.0.1:36545 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:08,243 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:09,363 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,101 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40241%2C1732076972119.1732077010101 2024-11-20T04:30:10,107 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,107 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:30:10,107 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741890_1074 2024-11-20T04:30:10,108 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:30:10,114 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,115 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,115 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,115 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,115 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,115 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076994225 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732077010101 2024-11-20T04:30:10,116 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37673:37673),(127.0.0.1/127.0.0.1:37513:37513)] 2024-11-20T04:30:10,116 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076994225 is not closed yet, will try archiving it next time 2024-11-20T04:30:10,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741876_1059 (size=12911) 2024-11-20T04:30:10,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40241 {}] regionserver.HRegion(8855): Flush requested on d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:30:10,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d9eda11a98fdcfdd972f5f79618c0ab6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T04:30:10,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/67c2d3545a5b4f34ad2b259c22b568f5 is 1080, key is row0013/info:/1732077010118/Put/seqid=0 2024-11-20T04:30:10,130 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,130 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:30:10,130 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741892_1076 2024-11-20T04:30:10,131 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:30:10,133 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,133 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:38957,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:30:10,133 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741893_1077 2024-11-20T04:30:10,133 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:30:10,135 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,135 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:30:10,135 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741894_1078 2024-11-20T04:30:10,136 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:30:10,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741895_1079 (size=8190) 2024-11-20T04:30:10,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741895_1079 (size=8190) 2024-11-20T04:30:10,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/67c2d3545a5b4f34ad2b259c22b568f5 2024-11-20T04:30:10,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/67c2d3545a5b4f34ad2b259c22b568f5 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/67c2d3545a5b4f34ad2b259c22b568f5 2024-11-20T04:30:10,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/67c2d3545a5b4f34ad2b259c22b568f5, entries=3, sequenceid=66, filesize=8.0 K 2024-11-20T04:30:10,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for d9eda11a98fdcfdd972f5f79618c0ab6 in 39ms, sequenceid=66, compaction requested=false 2024-11-20T04:30:10,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d9eda11a98fdcfdd972f5f79618c0ab6: 2024-11-20T04:30:10,161 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-20T04:30:10,161 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:30:10,161 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/31259a7874ee48faa6f77d42ad04db35 because midkey is the same as first or last row 2024-11-20T04:30:10,243 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,243 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-20T04:30:10,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T04:30:10,347 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:30:10,347 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:30:10,347 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:10,348 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:10,348 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T04:30:10,348 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T04:30:10,348 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1058747879, stopped=false 2024-11-20T04:30:10,348 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c2a32e16c274,42739,1732076972014 2024-11-20T04:30:10,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10133a312670002, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:30:10,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:30:10,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10133a312670002, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:10,351 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:30:10,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:10,351 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:30:10,351 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:30:10,351 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:10,351 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2a32e16c274,40241,1732076972119' ***** 2024-11-20T04:30:10,352 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T04:30:10,352 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2a32e16c274,41341,1732076973295' ***** 2024-11-20T04:30:10,352 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T04:30:10,352 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T04:30:10,352 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T04:30:10,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:30:10,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:10,352 INFO [RS:0;c2a32e16c274:40241 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T04:30:10,353 INFO [RS:0;c2a32e16c274:40241 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T04:30:10,353 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T04:30:10,353 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(3091): Received CLOSE for d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:30:10,353 INFO [RS:1;c2a32e16c274:41341 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T04:30:10,353 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T04:30:10,353 INFO [RS:1;c2a32e16c274:41341 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T04:30:10,353 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(959): stopping server c2a32e16c274,41341,1732076973295 2024-11-20T04:30:10,353 INFO [RS:1;c2a32e16c274:41341 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:30:10,353 INFO [RS:1;c2a32e16c274:41341 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c2a32e16c274:41341. 2024-11-20T04:30:10,353 DEBUG [RS:1;c2a32e16c274:41341 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:30:10,353 DEBUG [RS:1;c2a32e16c274:41341 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:10,353 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(976): stopping server c2a32e16c274,41341,1732076973295; all regions closed. 2024-11-20T04:30:10,353 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(959): stopping server c2a32e16c274,40241,1732076972119 2024-11-20T04:30:10,353 INFO [RS:0;c2a32e16c274:40241 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:30:10,353 INFO [RS:0;c2a32e16c274:40241 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c2a32e16c274:40241. 2024-11-20T04:30:10,353 DEBUG [RS:0;c2a32e16c274:40241 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:30:10,353 DEBUG [RS:0;c2a32e16c274:40241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:10,354 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:30:10,354 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T04:30:10,354 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T04:30:10,354 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T04:30:10,354 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41341-0x10133a312670002, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:30:10,354 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T04:30:10,354 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d9eda11a98fdcfdd972f5f79618c0ab6, disabling compactions & flushes 2024-11-20T04:30:10,354 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T04:30:10,354 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,354 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:30:10,354 DEBUG [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(1325): Online Regions={d9eda11a98fdcfdd972f5f79618c0ab6=TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T04:30:10,355 DEBUG [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d9eda11a98fdcfdd972f5f79618c0ab6 2024-11-20T04:30:10,355 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,355 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,355 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:30:10,355 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,355 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:30:10,355 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:30:10,355 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:30:10,355 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:30:10,355 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,355 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-20T04:30:10,356 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,356 ERROR [FSHLog-0-hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094-prefix:c2a32e16c274,40241,1732076972119.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,356 WARN [FSHLog-0-hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094-prefix:c2a32e16c274,40241,1732076972119.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,356 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,356 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 2024-11-20T04:30:10,356 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C40241%2C1732076972119.meta:.meta(num 1732076973144) roll requested 2024-11-20T04:30:10,354 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:30:10,356 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:30:10,356 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. after waiting 0 ms 2024-11-20T04:30:10,356 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:30:10,356 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40241%2C1732076972119.meta.1732077010356.meta 2024-11-20T04:30:10,356 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d9eda11a98fdcfdd972f5f79618c0ab6 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-20T04:30:10,357 WARN [IPC Server handler 4 on default port 40051 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-20T04:30:10,357 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 after 1ms 2024-11-20T04:30:10,361 INFO [regionserver/c2a32e16c274:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T04:30:10,361 INFO [regionserver/c2a32e16c274:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T04:30:10,365 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/bd0b82ef05bc4337b5052d78a8a10fd2 is 1080, key is row0015/info:/1732077010124/Put/seqid=0 2024-11-20T04:30:10,374 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,374 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,374 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,374 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,374 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,374 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732077010356.meta 2024-11-20T04:30:10,377 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,377 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,377 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta 2024-11-20T04:30:10,377 WARN [IPC Server handler 1 on default port 40051 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta has not been closed. Lease recovery is in progress. RecoveryId = 1083 for block blk_1073741834_1010 2024-11-20T04:30:10,378 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta after 1ms 2024-11-20T04:30:10,382 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37513:37513),(127.0.0.1/127.0.0.1:37673:37673)] 2024-11-20T04:30:10,382 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta is not closed yet, will try archiving it next time 2024-11-20T04:30:10,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741897_1082 (size=14660) 2024-11-20T04:30:10,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741897_1082 (size=14660) 2024-11-20T04:30:10,385 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/bd0b82ef05bc4337b5052d78a8a10fd2 2024-11-20T04:30:10,393 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/.tmp/info/bd0b82ef05bc4337b5052d78a8a10fd2 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/bd0b82ef05bc4337b5052d78a8a10fd2 2024-11-20T04:30:10,400 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/bd0b82ef05bc4337b5052d78a8a10fd2, entries=9, sequenceid=78, filesize=14.3 K 2024-11-20T04:30:10,401 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/.tmp/info/d4498cc4d9384962ba2657797a189d55 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6./info:regioninfo/1732076973773/Put/seqid=0 2024-11-20T04:30:10,402 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for d9eda11a98fdcfdd972f5f79618c0ab6 in 46ms, sequenceid=78, compaction requested=true 2024-11-20T04:30:10,403 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/c13f3d2d58684aa69624fee242901e97, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/1fdf186d3a9b46438c1ea18e8ae24f30, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/b57c1ccb08944dd3bc623f04b1c93230, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/0bb571b537ef4b78ac4943aac39a801b, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/9030b6418e13466f9e84eb6e7e071af4] to archive 2024-11-20T04:30:10,403 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,403 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:30:10,403 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741898_1084 2024-11-20T04:30:10,404 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T04:30:10,404 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:30:10,406 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/c13f3d2d58684aa69624fee242901e97 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/c13f3d2d58684aa69624fee242901e97 2024-11-20T04:30:10,408 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/1fdf186d3a9b46438c1ea18e8ae24f30 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/1fdf186d3a9b46438c1ea18e8ae24f30 2024-11-20T04:30:10,410 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/a2e6c60ae72c41ca8fd422576d345867 2024-11-20T04:30:10,411 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/b57c1ccb08944dd3bc623f04b1c93230 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/b57c1ccb08944dd3bc623f04b1c93230 2024-11-20T04:30:10,412 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/0bb571b537ef4b78ac4943aac39a801b to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/0bb571b537ef4b78ac4943aac39a801b 2024-11-20T04:30:10,414 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/9030b6418e13466f9e84eb6e7e071af4 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/info/9030b6418e13466f9e84eb6e7e071af4 2024-11-20T04:30:10,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741899_1085 (size=7089) 2024-11-20T04:30:10,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741899_1085 (size=7089) 2024-11-20T04:30:10,414 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c2a32e16c274:42739 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T04:30:10,415 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/.tmp/info/d4498cc4d9384962ba2657797a189d55 2024-11-20T04:30:10,415 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c13f3d2d58684aa69624fee242901e97=10347, 1fdf186d3a9b46438c1ea18e8ae24f30=12506, a2e6c60ae72c41ca8fd422576d345867=17994, b57c1ccb08944dd3bc623f04b1c93230=6027, 0bb571b537ef4b78ac4943aac39a801b=6027, 9030b6418e13466f9e84eb6e7e071af4=6027] 2024-11-20T04:30:10,421 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/default/TestLogRolling-testLogRollOnDatanodeDeath/d9eda11a98fdcfdd972f5f79618c0ab6/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-20T04:30:10,421 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:30:10,422 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d9eda11a98fdcfdd972f5f79618c0ab6: Waiting for close lock at 1732077010354Running coprocessor pre-close hooks at 1732077010354Disabling compacts and flushes for region at 1732077010354Disabling writes for close at 1732077010356 (+2 ms)Obtaining lock to block concurrent updates at 1732077010356Preparing flush snapshotting stores in d9eda11a98fdcfdd972f5f79618c0ab6 at 1732077010356Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732077010357 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. at 1732077010358 (+1 ms)Flushing d9eda11a98fdcfdd972f5f79618c0ab6/info: creating writer at 1732077010358Flushing d9eda11a98fdcfdd972f5f79618c0ab6/info: appending metadata at 1732077010364 (+6 ms)Flushing d9eda11a98fdcfdd972f5f79618c0ab6/info: closing flushed file at 1732077010364Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a8b997a: reopening flushed file at 1732077010392 (+28 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for d9eda11a98fdcfdd972f5f79618c0ab6 in 46ms, sequenceid=78, compaction requested=true at 1732077010402 (+10 ms)Writing region close event to WAL at 1732077010415 (+13 ms)Running coprocessor post-close hooks at 1732077010421 (+6 ms)Closed at 1732077010421 2024-11-20T04:30:10,422 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732076973403.d9eda11a98fdcfdd972f5f79618c0ab6. 2024-11-20T04:30:10,437 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/.tmp/ns/ceca5a08bd51462e8af1cc6b0106bf01 is 43, key is default/ns:d/1732076973217/Put/seqid=0 2024-11-20T04:30:10,439 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,439 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK], DatanodeInfoWithStorage[127.0.0.1:39709,DS-55e2085b-2e9c-4ffc-84df-259f8c67b689,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK]) is bad. 2024-11-20T04:30:10,439 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741900_1086 2024-11-20T04:30:10,440 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36545,DS-e8ea5853-cc6d-4305-9215-b019e27df0a1,DISK] 2024-11-20T04:30:10,442 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39517 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,442 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:60064 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741901_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data4]'}, localName='127.0.0.1:38957', datanodeUuid='83959f4a-849d-48e1-94ea-6cb816411dc3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741901_1087 to mirror 127.0.0.1:39517 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:10,443 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38957,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]) is bad. 2024-11-20T04:30:10,443 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741901_1087 2024-11-20T04:30:10,443 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:60064 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741901_1087] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:30:10,443 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:60064 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741901_1087] {}] datanode.DataXceiver(331): 127.0.0.1:38957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60064 dst: /127.0.0.1:38957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:10,443 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK] 2024-11-20T04:30:10,445 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46105 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,445 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:60072 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741902_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data4]'}, localName='127.0.0.1:38957', datanodeUuid='83959f4a-849d-48e1-94ea-6cb816411dc3', xmitsInProgress=0}:Exception transferring block BP-1675406404-172.17.0.2-1732076970975:blk_1073741902_1088 to mirror 127.0.0.1:46105 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:10,445 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38957,DS-fad33451-7600-49b5-b381-276f45eb9326,DISK], DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:30:10,445 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741902_1088 2024-11-20T04:30:10,445 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:60072 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741902_1088] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T04:30:10,446 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1675125938_22 at /127.0.0.1:60072 [Receiving block BP-1675406404-172.17.0.2-1732076970975:blk_1073741902_1088] {}] datanode.DataXceiver(331): 127.0.0.1:38957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60072 dst: /127.0.0.1:38957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:10,446 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:30:10,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741903_1089 (size=5153) 2024-11-20T04:30:10,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741903_1089 (size=5153) 2024-11-20T04:30:10,451 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/.tmp/ns/ceca5a08bd51462e8af1cc6b0106bf01 2024-11-20T04:30:10,474 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/.tmp/table/308a79e0cd9e4f1a8e7bc978af1722bf is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732076973789/Put/seqid=0 2024-11-20T04:30:10,476 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:10,476 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1675406404-172.17.0.2-1732076970975:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK], DatanodeInfoWithStorage[127.0.0.1:39517,DS-ae1bd8d8-75ee-476b-bf05-2b5216819790,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK]) is bad. 2024-11-20T04:30:10,476 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-1675406404-172.17.0.2-1732076970975:blk_1073741904_1090 2024-11-20T04:30:10,477 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46105,DS-a98d078a-fcb5-4388-9bab-0e221e2a9ba5,DISK] 2024-11-20T04:30:10,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741905_1091 (size=5424) 2024-11-20T04:30:10,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741905_1091 (size=5424) 2024-11-20T04:30:10,482 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/.tmp/table/308a79e0cd9e4f1a8e7bc978af1722bf 2024-11-20T04:30:10,488 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/.tmp/info/d4498cc4d9384962ba2657797a189d55 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/info/d4498cc4d9384962ba2657797a189d55 2024-11-20T04:30:10,495 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/info/d4498cc4d9384962ba2657797a189d55, entries=10, sequenceid=11, filesize=6.9 K 2024-11-20T04:30:10,496 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/.tmp/ns/ceca5a08bd51462e8af1cc6b0106bf01 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/ns/ceca5a08bd51462e8af1cc6b0106bf01 2024-11-20T04:30:10,501 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/ns/ceca5a08bd51462e8af1cc6b0106bf01, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T04:30:10,502 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/.tmp/table/308a79e0cd9e4f1a8e7bc978af1722bf as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/table/308a79e0cd9e4f1a8e7bc978af1722bf 2024-11-20T04:30:10,507 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/table/308a79e0cd9e4f1a8e7bc978af1722bf, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T04:30:10,509 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false 2024-11-20T04:30:10,515 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T04:30:10,516 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:30:10,516 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:30:10,516 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732077010355Running coprocessor pre-close hooks at 1732077010355Disabling compacts and flushes for region at 1732077010355Disabling writes for close at 1732077010355Obtaining lock to block concurrent updates at 1732077010355Preparing flush snapshotting stores in 1588230740 at 1732077010355Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732077010356 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732077010383 (+27 ms)Flushing 1588230740/info: creating writer at 1732077010383Flushing 1588230740/info: appending metadata at 1732077010401 (+18 ms)Flushing 1588230740/info: closing flushed file at 1732077010401Flushing 1588230740/ns: creating writer at 1732077010421 (+20 ms)Flushing 1588230740/ns: appending metadata at 1732077010436 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732077010436Flushing 1588230740/table: creating writer at 1732077010458 (+22 ms)Flushing 1588230740/table: appending metadata at 1732077010474 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732077010474Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@519cee67: reopening flushed file at 1732077010488 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1906afdb: reopening flushed file at 1732077010495 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32a75d64: reopening flushed file at 1732077010501 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false at 1732077010509 (+8 ms)Writing region close event to WAL at 1732077010512 (+3 ms)Running coprocessor post-close hooks at 1732077010516 (+4 ms)Closed at 1732077010516 2024-11-20T04:30:10,517 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T04:30:10,518 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.1732076994225 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs/c2a32e16c274%2C40241%2C1732076972119.1732076994225 2024-11-20T04:30:10,529 INFO [regionserver/c2a32e16c274:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T04:30:10,529 INFO [regionserver/c2a32e16c274:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T04:30:10,534 INFO [regionserver/c2a32e16c274:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:30:10,555 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(976): stopping server c2a32e16c274,40241,1732076972119; all regions closed. 2024-11-20T04:30:10,555 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,555 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,556 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,556 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,556 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:10,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741896_1081 (size=825) 2024-11-20T04:30:10,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741896_1081 (size=825) 2024-11-20T04:30:10,715 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5257c09a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39709, datanodeUuid=daca8ad2-2090-44e3-8eba-acb1eefa44a3, infoPort=37513, infoSecurePort=0, ipcPort=46163, storageInfo=lv=-57;cid=testClusterID;nsid=91487624;c=1732076970975):Failed to transfer BP-1675406404-172.17.0.2-1732076970975:blk_1073741876_1059 to 127.0.0.1:46105 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:11,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T04:30:11,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:30:11,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T04:30:11,363 INFO [regionserver/c2a32e16c274:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:30:13,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741835_1011 (size=393) 2024-11-20T04:30:13,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:30:13,796 INFO [master/c2a32e16c274:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T04:30:13,796 INFO [master/c2a32e16c274:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T04:30:14,358 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 after 4002ms 2024-11-20T04:30:14,379 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta after 4002ms 2024-11-20T04:30:14,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:30:14,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:30:15,356 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T04:30:15,358 DEBUG [RS:1;c2a32e16c274:41341 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs 2024-11-20T04:30:15,358 INFO [RS:1;c2a32e16c274:41341 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C41341%2C1732076973295:(num 1732076973502) 2024-11-20T04:30:15,358 DEBUG [RS:1;c2a32e16c274:41341 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:15,358 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:30:15,359 INFO [RS:1;c2a32e16c274:41341 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:30:15,359 INFO [RS:1;c2a32e16c274:41341 {}] hbase.ChoreService(370): Chore service for: regionserver/c2a32e16c274:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T04:30:15,359 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T04:30:15,359 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:30:15,359 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T04:30:15,359 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T04:30:15,359 INFO [RS:1;c2a32e16c274:41341 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:30:15,359 INFO [RS:1;c2a32e16c274:41341 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41341 2024-11-20T04:30:15,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10133a312670002, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2a32e16c274,41341,1732076973295 2024-11-20T04:30:15,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:30:15,364 INFO [RS:1;c2a32e16c274:41341 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:30:15,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:15,365 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2a32e16c274,41341,1732076973295] 2024-11-20T04:30:15,366 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2a32e16c274,41341,1732076973295 already deleted, retry=false 2024-11-20T04:30:15,366 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2a32e16c274,41341,1732076973295 expired; onlineServers=1 2024-11-20T04:30:15,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10133a312670002, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:15,466 INFO [RS:1;c2a32e16c274:41341 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:30:15,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10133a312670002, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:15,466 INFO [RS:1;c2a32e16c274:41341 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2a32e16c274,41341,1732076973295; zookeeper connection closed. 2024-11-20T04:30:15,466 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@38e0629a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@38e0629a 2024-11-20T04:30:15,556 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T04:30:15,559 DEBUG [RS:0;c2a32e16c274:40241 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs 2024-11-20T04:30:15,559 INFO [RS:0;c2a32e16c274:40241 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C40241%2C1732076972119.meta:.meta(num 1732077010356) 2024-11-20T04:30:15,560 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,560 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,560 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,560 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,560 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741891_1075 (size=14682) 2024-11-20T04:30:15,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741891_1075 (size=14682) 2024-11-20T04:30:15,565 DEBUG [RS:0;c2a32e16c274:40241 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs 2024-11-20T04:30:15,565 INFO [RS:0;c2a32e16c274:40241 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C40241%2C1732076972119:(num 1732077010101) 2024-11-20T04:30:15,565 DEBUG [RS:0;c2a32e16c274:40241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:15,565 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:30:15,565 INFO [RS:0;c2a32e16c274:40241 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:30:15,565 INFO [RS:0;c2a32e16c274:40241 {}] hbase.ChoreService(370): Chore service for: regionserver/c2a32e16c274:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T04:30:15,565 INFO [RS:0;c2a32e16c274:40241 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:30:15,565 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:30:15,566 INFO [RS:0;c2a32e16c274:40241 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40241 2024-11-20T04:30:15,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2a32e16c274,40241,1732076972119 2024-11-20T04:30:15,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:30:15,569 INFO [RS:0;c2a32e16c274:40241 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:30:15,570 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2a32e16c274,40241,1732076972119] 2024-11-20T04:30:15,571 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2a32e16c274,40241,1732076972119 already deleted, retry=false 2024-11-20T04:30:15,571 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2a32e16c274,40241,1732076972119 expired; onlineServers=0 2024-11-20T04:30:15,571 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c2a32e16c274,42739,1732076972014' ***** 2024-11-20T04:30:15,571 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T04:30:15,571 INFO [M:0;c2a32e16c274:42739 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:30:15,571 INFO [M:0;c2a32e16c274:42739 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:30:15,571 DEBUG [M:0;c2a32e16c274:42739 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T04:30:15,571 DEBUG [M:0;c2a32e16c274:42739 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T04:30:15,571 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T04:30:15,572 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732076972440 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732076972440,5,FailOnTimeoutGroup] 2024-11-20T04:30:15,572 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732076972438 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732076972438,5,FailOnTimeoutGroup] 2024-11-20T04:30:15,572 INFO [M:0;c2a32e16c274:42739 {}] hbase.ChoreService(370): Chore service for: master/c2a32e16c274:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T04:30:15,572 INFO [M:0;c2a32e16c274:42739 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:30:15,572 DEBUG [M:0;c2a32e16c274:42739 {}] master.HMaster(1795): Stopping service threads 2024-11-20T04:30:15,572 INFO [M:0;c2a32e16c274:42739 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T04:30:15,572 INFO [M:0;c2a32e16c274:42739 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:30:15,572 INFO [M:0;c2a32e16c274:42739 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T04:30:15,572 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T04:30:15,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T04:30:15,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:15,573 DEBUG [M:0;c2a32e16c274:42739 {}] zookeeper.ZKUtil(347): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T04:30:15,573 WARN [M:0;c2a32e16c274:42739 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T04:30:15,574 INFO [M:0;c2a32e16c274:42739 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/.lastflushedseqids 2024-11-20T04:30:15,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741906_1092 (size=130) 2024-11-20T04:30:15,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741906_1092 (size=130) 2024-11-20T04:30:15,580 INFO [M:0;c2a32e16c274:42739 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T04:30:15,580 INFO [M:0;c2a32e16c274:42739 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T04:30:15,580 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:30:15,580 INFO [M:0;c2a32e16c274:42739 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:15,580 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:15,580 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:30:15,580 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:15,581 INFO [M:0;c2a32e16c274:42739 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-20T04:30:15,597 DEBUG [M:0;c2a32e16c274:42739 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8960585e31864c61b963e2bceea39d56 is 82, key is hbase:meta,,1/info:regioninfo/1732076973197/Put/seqid=0 2024-11-20T04:30:15,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741907_1093 (size=5672) 2024-11-20T04:30:15,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741907_1093 (size=5672) 2024-11-20T04:30:15,603 INFO [M:0;c2a32e16c274:42739 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8960585e31864c61b963e2bceea39d56 2024-11-20T04:30:15,623 DEBUG [M:0;c2a32e16c274:42739 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b137a20dea334bc38a5d30f8515f8d33 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732076973795/Put/seqid=0 2024-11-20T04:30:15,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741908_1094 (size=6255) 2024-11-20T04:30:15,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741908_1094 (size=6255) 2024-11-20T04:30:15,629 INFO [M:0;c2a32e16c274:42739 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b137a20dea334bc38a5d30f8515f8d33 2024-11-20T04:30:15,634 INFO [M:0;c2a32e16c274:42739 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b137a20dea334bc38a5d30f8515f8d33 2024-11-20T04:30:15,649 DEBUG [M:0;c2a32e16c274:42739 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e4c40b35c0014baba2ec248434f2fee1 is 69, key is c2a32e16c274,40241,1732076972119/rs:state/1732076972498/Put/seqid=0 2024-11-20T04:30:15,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741909_1095 (size=5224) 2024-11-20T04:30:15,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741909_1095 (size=5224) 2024-11-20T04:30:15,655 INFO [M:0;c2a32e16c274:42739 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e4c40b35c0014baba2ec248434f2fee1 2024-11-20T04:30:15,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:15,670 INFO [RS:0;c2a32e16c274:40241 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:30:15,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40241-0x10133a312670001, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:15,670 INFO [RS:0;c2a32e16c274:40241 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2a32e16c274,40241,1732076972119; zookeeper connection closed. 2024-11-20T04:30:15,670 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1fad56a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1fad56a 2024-11-20T04:30:15,671 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-20T04:30:15,675 DEBUG [M:0;c2a32e16c274:42739 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7974abf7e82c4ca59dc48097d52d6759 is 52, key is load_balancer_on/state:d/1732076973272/Put/seqid=0 2024-11-20T04:30:15,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741910_1096 (size=5056) 2024-11-20T04:30:15,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741910_1096 (size=5056) 2024-11-20T04:30:15,680 INFO [M:0;c2a32e16c274:42739 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7974abf7e82c4ca59dc48097d52d6759 2024-11-20T04:30:15,686 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8960585e31864c61b963e2bceea39d56 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8960585e31864c61b963e2bceea39d56 2024-11-20T04:30:15,691 INFO [M:0;c2a32e16c274:42739 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8960585e31864c61b963e2bceea39d56, entries=8, sequenceid=60, filesize=5.5 K 2024-11-20T04:30:15,692 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b137a20dea334bc38a5d30f8515f8d33 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b137a20dea334bc38a5d30f8515f8d33 2024-11-20T04:30:15,696 INFO [M:0;c2a32e16c274:42739 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b137a20dea334bc38a5d30f8515f8d33 2024-11-20T04:30:15,696 INFO [M:0;c2a32e16c274:42739 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b137a20dea334bc38a5d30f8515f8d33, entries=6, sequenceid=60, filesize=6.1 K 2024-11-20T04:30:15,697 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e4c40b35c0014baba2ec248434f2fee1 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e4c40b35c0014baba2ec248434f2fee1 2024-11-20T04:30:15,702 INFO [M:0;c2a32e16c274:42739 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e4c40b35c0014baba2ec248434f2fee1, entries=2, sequenceid=60, filesize=5.1 K 2024-11-20T04:30:15,703 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7974abf7e82c4ca59dc48097d52d6759 as hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7974abf7e82c4ca59dc48097d52d6759 2024-11-20T04:30:15,708 INFO [M:0;c2a32e16c274:42739 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7974abf7e82c4ca59dc48097d52d6759, entries=1, sequenceid=60, filesize=4.9 K 2024-11-20T04:30:15,709 INFO [M:0;c2a32e16c274:42739 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false 2024-11-20T04:30:15,711 INFO [M:0;c2a32e16c274:42739 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:15,711 DEBUG [M:0;c2a32e16c274:42739 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732077015580Disabling compacts and flushes for region at 1732077015580Disabling writes for close at 1732077015580Obtaining lock to block concurrent updates at 1732077015581 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732077015581Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732077015581Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732077015582 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732077015582Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732077015597 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732077015597Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732077015608 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732077015623 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732077015623Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732077015635 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732077015649 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732077015649Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732077015660 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732077015674 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732077015674Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2aae7d2c: reopening flushed file at 1732077015685 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4dab1bea: reopening flushed file at 1732077015691 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71fee36c: reopening flushed file at 1732077015696 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b6d3a6c: reopening flushed file at 1732077015702 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false at 1732077015709 (+7 ms)Writing region close event to WAL at 1732077015711 (+2 ms)Closed at 1732077015711 2024-11-20T04:30:15,712 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,712 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,712 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,712 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,712 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:15,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38957 is added to blk_1073741889_1072 (size=1045) 2024-11-20T04:30:15,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741889_1072 (size=1045) 2024-11-20T04:30:15,947 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T04:30:15,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:15,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:16,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:16,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:16,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:30:16,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39709 is added to blk_1073741836_1012 (size=76) 2024-11-20T04:30:16,601 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@126da0c0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1675406404-172.17.0.2-1732076970975:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:36545,null,null]) java.net.ConnectException: Call From c2a32e16c274/172.17.0.2 to localhost:38571 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-20T04:30:17,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:17,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:17,457 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/WALs/c2a32e16c274,42739,1732076972014/c2a32e16c274%2C42739%2C1732076972014.1732076972283 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/oldWALs/c2a32e16c274%2C42739%2C1732076972014.1732076972283 2024-11-20T04:30:17,461 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/MasterData/oldWALs/c2a32e16c274%2C42739%2C1732076972014.1732076972283 to hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/oldWALs/c2a32e16c274%2C42739%2C1732076972014.1732076972283$masterlocalwal$ 2024-11-20T04:30:17,461 INFO [M:0;c2a32e16c274:42739 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T04:30:17,461 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:30:17,461 INFO [M:0;c2a32e16c274:42739 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42739 2024-11-20T04:30:17,461 INFO [M:0;c2a32e16c274:42739 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:30:17,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:17,563 INFO [M:0;c2a32e16c274:42739 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:30:17,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42739-0x10133a312670000, quorum=127.0.0.1:53242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:17,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2edfde78{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:17,566 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9536d35{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:17,566 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:17,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17162101{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:17,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d6ec757{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:17,568 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:30:17,568 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:30:17,568 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:30:17,567 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6a6efbff {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:36545,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:38571 , LocalHost:localPort c2a32e16c274/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-20T04:30:17,568 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1675406404-172.17.0.2-1732076970975 (Datanode Uuid 83959f4a-849d-48e1-94ea-6cb816411dc3) service to localhost/127.0.0.1:40051 2024-11-20T04:30:17,569 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data3/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:17,569 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data4/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:17,569 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6a6efbff {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1675406404-172.17.0.2-1732076970975:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:36545,null,null], DatanodeInfoWithStorage[127.0.0.1:38957,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1675406404-172.17.0.2-1732076970975 2024-11-20T04:30:17,569 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6a6efbff {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38957,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1675406404-172.17.0.2-1732076970975 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:17,569 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:30:17,569 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6a6efbff {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:36545,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1675406404-172.17.0.2-1732076970975 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:17,570 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6a6efbff {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:38957,null,null], DatanodeInfoWithStorage[127.0.0.1:36545,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1675406404-172.17.0.2-1732076970975:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:38957,null,null], DatanodeInfoWithStorage[127.0.0.1:36545,null,null]] 2024-11-20T04:30:17,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b5be5aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:17,573 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f7383da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:17,573 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:17,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167a7fde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:17,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@519de6b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:17,574 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:30:17,574 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:30:17,574 WARN [BP-1675406404-172.17.0.2-1732076970975 heartbeating to localhost/127.0.0.1:40051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1675406404-172.17.0.2-1732076970975 (Datanode Uuid daca8ad2-2090-44e3-8eba-acb1eefa44a3) service to localhost/127.0.0.1:40051 2024-11-20T04:30:17,574 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:30:17,575 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data5/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:17,575 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/cluster_38e6ef2d-9f2a-a04e-1c17-b003fec73818/data/data6/current/BP-1675406404-172.17.0.2-1732076970975 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:17,575 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:30:17,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@56ff2226{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:30:17,584 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b5c1b0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:17,584 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:17,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d8a9c69{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:17,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35c95cb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:17,592 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T04:30:17,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T04:30:17,631 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=153 (was 78) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46841 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40051 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:40051 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40051 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40051 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40051 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:40051 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46841 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f0a34bef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40051 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f0a34bef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=210 (was 205) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7567 (was 8160) 2024-11-20T04:30:17,639 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=153, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=210, ProcessCount=11, AvailableMemoryMB=7566 2024-11-20T04:30:17,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.log.dir so I do NOT create it in target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7413a35e-0ea0-7523-9cbf-2c1fb3bad653/hadoop.tmp.dir so I do NOT create it in target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493, deleteOnExit=true 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/test.cache.data in system properties and HBase conf 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir in system properties and HBase conf 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T04:30:17,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T04:30:17,640 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T04:30:17,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/nfs.dump.dir in system properties and HBase conf 2024-11-20T04:30:17,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/java.io.tmpdir in system properties and HBase conf 2024-11-20T04:30:17,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:30:17,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T04:30:17,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T04:30:17,656 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:30:17,732 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:17,736 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:17,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:17,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:17,741 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:30:17,742 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:17,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444db7a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:17,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77037455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:17,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3fab170c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/java.io.tmpdir/jetty-localhost-39289-hadoop-hdfs-3_4_1-tests_jar-_-any-10173701454194471662/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:30:17,859 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d2191b9{HTTP/1.1, (http/1.1)}{localhost:39289} 2024-11-20T04:30:17,859 INFO [Time-limited test {}] server.Server(415): Started @151937ms 2024-11-20T04:30:17,873 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:30:17,954 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:17,957 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:17,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:17,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:17,958 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:30:17,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232fa1ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:17,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e63449e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:18,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7091f2a1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/java.io.tmpdir/jetty-localhost-43457-hadoop-hdfs-3_4_1-tests_jar-_-any-3043607005672116454/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:18,074 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4360f0f4{HTTP/1.1, (http/1.1)}{localhost:43457} 2024-11-20T04:30:18,074 INFO [Time-limited test {}] server.Server(415): Started @152153ms 2024-11-20T04:30:18,075 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:30:18,120 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:18,124 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:18,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:18,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:18,126 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:30:18,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@393a832c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:18,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51031d29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:18,194 WARN [Thread-1185 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data1/current/BP-524018934-172.17.0.2-1732077017674/current, will proceed with Du for space computation calculation, 2024-11-20T04:30:18,194 WARN [Thread-1186 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data2/current/BP-524018934-172.17.0.2-1732077017674/current, will proceed with Du for space computation calculation, 2024-11-20T04:30:18,217 WARN [Thread-1164 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:30:18,220 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd439b5f67e9ae01f with lease ID 0x9e188147bdb89f09: Processing first storage report for DS-78289be3-b11f-4030-9499-2012da7b3c24 from datanode DatanodeRegistration(127.0.0.1:38659, datanodeUuid=c4a40280-f54d-4d23-9f7a-e14c3e8341a9, infoPort=34289, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674) 2024-11-20T04:30:18,220 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd439b5f67e9ae01f with lease ID 0x9e188147bdb89f09: from storage DS-78289be3-b11f-4030-9499-2012da7b3c24 node DatanodeRegistration(127.0.0.1:38659, datanodeUuid=c4a40280-f54d-4d23-9f7a-e14c3e8341a9, infoPort=34289, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:18,220 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd439b5f67e9ae01f with lease ID 0x9e188147bdb89f09: Processing first storage report for DS-a37f9a57-2e28-4ab8-824e-73edccf42b1e from datanode DatanodeRegistration(127.0.0.1:38659, datanodeUuid=c4a40280-f54d-4d23-9f7a-e14c3e8341a9, infoPort=34289, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674) 2024-11-20T04:30:18,220 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd439b5f67e9ae01f with lease ID 0x9e188147bdb89f09: from storage DS-a37f9a57-2e28-4ab8-824e-73edccf42b1e node DatanodeRegistration(127.0.0.1:38659, datanodeUuid=c4a40280-f54d-4d23-9f7a-e14c3e8341a9, infoPort=34289, infoSecurePort=0, ipcPort=42217, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:18,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78745d04{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/java.io.tmpdir/jetty-localhost-36201-hadoop-hdfs-3_4_1-tests_jar-_-any-12182790158758063970/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:18,256 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ea5a7a{HTTP/1.1, (http/1.1)}{localhost:36201} 2024-11-20T04:30:18,256 INFO [Time-limited test {}] server.Server(415): Started @152335ms 2024-11-20T04:30:18,258 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:30:18,359 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data4/current/BP-524018934-172.17.0.2-1732077017674/current, will proceed with Du for space computation calculation, 2024-11-20T04:30:18,360 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data3/current/BP-524018934-172.17.0.2-1732077017674/current, will proceed with Du for space computation calculation, 2024-11-20T04:30:18,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:18,381 WARN [Thread-1200 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:30:18,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:18,385 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31a62da80ed82d41 with lease ID 0x9e188147bdb89f0a: Processing first storage report for DS-664ef650-2b74-4030-b463-59a5d0dce02b from datanode DatanodeRegistration(127.0.0.1:39853, datanodeUuid=e29a8ceb-33a1-47a2-aa99-54fe9f459a9f, infoPort=33145, infoSecurePort=0, ipcPort=32989, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674) 2024-11-20T04:30:18,385 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31a62da80ed82d41 with lease ID 0x9e188147bdb89f0a: from storage DS-664ef650-2b74-4030-b463-59a5d0dce02b node DatanodeRegistration(127.0.0.1:39853, datanodeUuid=e29a8ceb-33a1-47a2-aa99-54fe9f459a9f, infoPort=33145, infoSecurePort=0, ipcPort=32989, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:18,385 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31a62da80ed82d41 with lease ID 0x9e188147bdb89f0a: Processing first storage report for DS-a5d39f6e-9ff1-4c1a-8396-558d53f578a9 from datanode DatanodeRegistration(127.0.0.1:39853, datanodeUuid=e29a8ceb-33a1-47a2-aa99-54fe9f459a9f, infoPort=33145, infoSecurePort=0, ipcPort=32989, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674) 2024-11-20T04:30:18,385 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31a62da80ed82d41 with lease ID 0x9e188147bdb89f0a: from storage DS-a5d39f6e-9ff1-4c1a-8396-558d53f578a9 node DatanodeRegistration(127.0.0.1:39853, datanodeUuid=e29a8ceb-33a1-47a2-aa99-54fe9f459a9f, infoPort=33145, infoSecurePort=0, ipcPort=32989, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:18,486 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c 2024-11-20T04:30:18,489 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/zookeeper_0, clientPort=49423, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T04:30:18,490 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49423 2024-11-20T04:30:18,490 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:18,492 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:18,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39853 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:30:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38659 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:30:18,503 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4 with version=8 2024-11-20T04:30:18,503 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/hbase-staging 2024-11-20T04:30:18,505 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:30:18,505 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:18,505 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:18,506 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:30:18,506 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:18,506 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:30:18,506 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T04:30:18,506 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:30:18,506 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39381 2024-11-20T04:30:18,508 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39381 connecting to ZooKeeper ensemble=127.0.0.1:49423 2024-11-20T04:30:18,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:393810x0, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:30:18,518 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39381-0x10133a3c8080000 connected 2024-11-20T04:30:18,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:18,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:18,545 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:30:18,545 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4, hbase.cluster.distributed=false 2024-11-20T04:30:18,547 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:30:18,547 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39381 2024-11-20T04:30:18,548 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39381 2024-11-20T04:30:18,548 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39381 2024-11-20T04:30:18,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39381 2024-11-20T04:30:18,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39381 2024-11-20T04:30:18,567 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:30:18,567 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:18,567 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:18,567 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:30:18,567 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:18,567 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:30:18,567 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T04:30:18,567 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:30:18,568 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41203 2024-11-20T04:30:18,569 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41203 connecting to ZooKeeper ensemble=127.0.0.1:49423 2024-11-20T04:30:18,570 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:18,572 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:18,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412030x0, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:30:18,576 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412030x0, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:30:18,576 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41203-0x10133a3c8080001 connected 2024-11-20T04:30:18,576 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T04:30:18,577 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T04:30:18,578 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T04:30:18,579 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:30:18,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41203 2024-11-20T04:30:18,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41203 2024-11-20T04:30:18,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41203 2024-11-20T04:30:18,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41203 2024-11-20T04:30:18,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41203 2024-11-20T04:30:18,598 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c2a32e16c274:39381 2024-11-20T04:30:18,598 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c2a32e16c274,39381,1732077018505 2024-11-20T04:30:18,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:30:18,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:30:18,602 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c2a32e16c274,39381,1732077018505 2024-11-20T04:30:18,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T04:30:18,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,606 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T04:30:18,607 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c2a32e16c274,39381,1732077018505 from backup master directory 2024-11-20T04:30:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c2a32e16c274,39381,1732077018505 2024-11-20T04:30:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:30:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:30:18,608 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:30:18,608 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c2a32e16c274,39381,1732077018505 2024-11-20T04:30:18,613 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/hbase.id] with ID: 3e511e8f-12b3-41ee-ac40-3479f67c7b90 2024-11-20T04:30:18,614 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/.tmp/hbase.id 2024-11-20T04:30:18,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38659 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:30:18,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39853 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:30:18,625 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/.tmp/hbase.id]:[hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/hbase.id] 2024-11-20T04:30:18,638 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:18,638 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T04:30:18,640 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-20T04:30:18,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38659 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:30:18,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39853 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:30:18,664 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:30:18,665 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T04:30:18,666 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:30:18,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38659 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:30:18,682 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store 2024-11-20T04:30:18,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39853 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:30:18,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38659 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:30:18,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39853 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:30:18,695 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:18,695 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:30:18,695 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:18,695 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:18,695 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:30:18,695 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:18,695 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:18,695 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732077018695Disabling compacts and flushes for region at 1732077018695Disabling writes for close at 1732077018695Writing region close event to WAL at 1732077018695Closed at 1732077018695 2024-11-20T04:30:18,696 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/.initializing 2024-11-20T04:30:18,696 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505 2024-11-20T04:30:18,700 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C39381%2C1732077018505, suffix=, logDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505, archiveDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/oldWALs, maxLogs=10 2024-11-20T04:30:18,701 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C39381%2C1732077018505.1732077018700 2024-11-20T04:30:18,720 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077018700 2024-11-20T04:30:18,727 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34289:34289),(127.0.0.1/127.0.0.1:33145:33145)] 2024-11-20T04:30:18,732 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:30:18,733 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:18,733 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,733 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T04:30:18,739 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:18,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:18,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T04:30:18,741 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:18,742 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:30:18,742 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T04:30:18,744 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:18,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:30:18,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T04:30:18,746 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:18,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:30:18,747 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,748 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,748 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,750 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,750 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,750 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T04:30:18,752 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:18,757 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:30:18,757 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=839890, jitterRate=0.067975714802742}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T04:30:18,758 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732077018733Initializing all the Stores at 1732077018734 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077018734Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077018736 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077018736Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077018736Cleaning up temporary data from old regions at 1732077018750 (+14 ms)Region opened successfully at 1732077018758 (+8 ms) 2024-11-20T04:30:18,758 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T04:30:18,762 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c6593c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:30:18,763 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T04:30:18,763 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T04:30:18,764 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T04:30:18,764 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T04:30:18,764 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T04:30:18,765 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T04:30:18,765 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T04:30:18,768 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T04:30:18,769 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T04:30:18,771 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T04:30:18,771 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T04:30:18,772 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T04:30:18,774 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T04:30:18,774 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T04:30:18,779 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T04:30:18,780 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T04:30:18,784 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T04:30:18,785 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T04:30:18,788 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T04:30:18,791 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T04:30:18,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:30:18,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:30:18,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,797 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c2a32e16c274,39381,1732077018505, sessionid=0x10133a3c8080000, setting cluster-up flag (Was=false) 2024-11-20T04:30:18,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,805 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T04:30:18,807 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,39381,1732077018505 2024-11-20T04:30:18,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:18,816 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T04:30:18,817 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,39381,1732077018505 2024-11-20T04:30:18,819 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T04:30:18,821 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T04:30:18,821 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T04:30:18,821 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T04:30:18,821 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c2a32e16c274,39381,1732077018505 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T04:30:18,823 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:30:18,823 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:30:18,823 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:30:18,823 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:30:18,823 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c2a32e16c274:0, corePoolSize=10, maxPoolSize=10 2024-11-20T04:30:18,823 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,823 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:30:18,823 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,826 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732077048826 2024-11-20T04:30:18,826 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:30:18,826 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T04:30:18,826 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T04:30:18,826 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T04:30:18,826 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T04:30:18,826 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T04:30:18,826 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T04:30:18,826 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T04:30:18,827 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:18,827 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T04:30:18,828 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,829 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T04:30:18,829 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T04:30:18,829 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T04:30:18,830 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T04:30:18,830 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T04:30:18,830 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077018830,5,FailOnTimeoutGroup] 2024-11-20T04:30:18,830 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077018830,5,FailOnTimeoutGroup] 2024-11-20T04:30:18,831 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,831 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T04:30:18,831 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,831 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39853 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:30:18,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38659 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:30:18,842 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T04:30:18,842 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4 2024-11-20T04:30:18,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38659 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:30:18,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39853 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:30:18,855 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:18,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:30:18,858 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:30:18,858 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:18,858 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:18,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:30:18,860 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:30:18,860 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:18,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:18,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:30:18,862 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:30:18,862 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:18,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:18,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:30:18,863 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:30:18,863 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:18,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:18,864 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:30:18,865 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740 2024-11-20T04:30:18,865 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740 2024-11-20T04:30:18,868 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:30:18,868 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:30:18,868 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:30:18,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:30:18,872 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:30:18,873 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785909, jitterRate=-6.651729345321655E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:30:18,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732077018855Initializing all the Stores at 1732077018856 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077018856Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077018856Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077018856Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077018856Cleaning up temporary data from old regions at 1732077018868 (+12 ms)Region opened successfully at 1732077018874 (+6 ms) 2024-11-20T04:30:18,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:30:18,874 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:30:18,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:30:18,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:30:18,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:30:18,875 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:30:18,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732077018874Disabling compacts and flushes for region at 1732077018874Disabling writes for close at 1732077018874Writing region close event to WAL at 1732077018875 (+1 ms)Closed at 1732077018875 2024-11-20T04:30:18,876 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:30:18,876 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T04:30:18,877 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T04:30:18,878 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:30:18,879 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T04:30:18,883 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(746): ClusterId : 3e511e8f-12b3-41ee-ac40-3479f67c7b90 2024-11-20T04:30:18,884 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T04:30:18,887 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T04:30:18,887 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T04:30:18,890 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T04:30:18,891 DEBUG [RS:0;c2a32e16c274:41203 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bafed9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:30:18,908 DEBUG [RS:0;c2a32e16c274:41203 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c2a32e16c274:41203 2024-11-20T04:30:18,909 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T04:30:18,909 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T04:30:18,909 DEBUG [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T04:30:18,910 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2a32e16c274,39381,1732077018505 with port=41203, startcode=1732077018566 2024-11-20T04:30:18,910 DEBUG [RS:0;c2a32e16c274:41203 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T04:30:18,912 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48171, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T04:30:18,913 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2a32e16c274,41203,1732077018566 2024-11-20T04:30:18,913 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39381 {}] master.ServerManager(517): Registering regionserver=c2a32e16c274,41203,1732077018566 2024-11-20T04:30:18,915 DEBUG [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4 2024-11-20T04:30:18,915 DEBUG [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39703 2024-11-20T04:30:18,915 DEBUG [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T04:30:18,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:30:18,917 DEBUG [RS:0;c2a32e16c274:41203 {}] zookeeper.ZKUtil(111): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2a32e16c274,41203,1732077018566 2024-11-20T04:30:18,917 WARN [RS:0;c2a32e16c274:41203 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:30:18,917 INFO [RS:0;c2a32e16c274:41203 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:30:18,918 DEBUG [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566 2024-11-20T04:30:18,918 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2a32e16c274,41203,1732077018566] 2024-11-20T04:30:18,922 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T04:30:18,924 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T04:30:18,925 INFO [RS:0;c2a32e16c274:41203 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T04:30:18,925 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,926 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T04:30:18,927 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T04:30:18,927 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:18,927 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:30:18,928 DEBUG [RS:0;c2a32e16c274:41203 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:30:18,929 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,929 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,929 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,929 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,929 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,929 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,41203,1732077018566-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:30:18,957 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T04:30:18,957 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,41203,1732077018566-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,957 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,957 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.Replication(171): c2a32e16c274,41203,1732077018566 started 2024-11-20T04:30:18,980 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:18,980 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(1482): Serving as c2a32e16c274,41203,1732077018566, RpcServer on c2a32e16c274/172.17.0.2:41203, sessionid=0x10133a3c8080001 2024-11-20T04:30:18,981 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T04:30:18,981 DEBUG [RS:0;c2a32e16c274:41203 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2a32e16c274,41203,1732077018566 2024-11-20T04:30:18,981 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,41203,1732077018566' 2024-11-20T04:30:18,981 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T04:30:18,981 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T04:30:18,982 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T04:30:18,982 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T04:30:18,982 DEBUG [RS:0;c2a32e16c274:41203 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2a32e16c274,41203,1732077018566 2024-11-20T04:30:18,982 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,41203,1732077018566' 2024-11-20T04:30:18,982 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T04:30:18,982 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T04:30:18,983 DEBUG [RS:0;c2a32e16c274:41203 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T04:30:18,983 INFO [RS:0;c2a32e16c274:41203 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T04:30:18,983 INFO [RS:0;c2a32e16c274:41203 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T04:30:19,029 WARN [c2a32e16c274:39381 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T04:30:19,085 INFO [RS:0;c2a32e16c274:41203 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C41203%2C1732077018566, suffix=, logDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566, archiveDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/oldWALs, maxLogs=32 2024-11-20T04:30:19,086 INFO [RS:0;c2a32e16c274:41203 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41203%2C1732077018566.1732077019086 2024-11-20T04:30:19,092 INFO [RS:0;c2a32e16c274:41203 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 2024-11-20T04:30:19,093 DEBUG [RS:0;c2a32e16c274:41203 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33145:33145),(127.0.0.1/127.0.0.1:34289:34289)] 2024-11-20T04:30:19,280 DEBUG [c2a32e16c274:39381 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T04:30:19,280 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c2a32e16c274,41203,1732077018566 2024-11-20T04:30:19,282 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,41203,1732077018566, state=OPENING 2024-11-20T04:30:19,283 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T04:30:19,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:19,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:19,289 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:30:19,289 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:30:19,289 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:30:19,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,41203,1732077018566}] 2024-11-20T04:30:19,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:19,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:19,442 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T04:30:19,444 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41669, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T04:30:19,449 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T04:30:19,449 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:30:19,451 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C41203%2C1732077018566.meta, suffix=.meta, logDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566, archiveDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/oldWALs, maxLogs=32 2024-11-20T04:30:19,451 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta 2024-11-20T04:30:19,456 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta 2024-11-20T04:30:19,460 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33145:33145),(127.0.0.1/127.0.0.1:34289:34289)] 2024-11-20T04:30:19,462 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:30:19,462 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T04:30:19,462 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T04:30:19,462 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T04:30:19,462 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T04:30:19,462 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:19,462 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T04:30:19,462 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T04:30:19,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:30:19,465 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:30:19,465 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:19,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:19,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:30:19,466 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:30:19,466 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:19,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:19,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:30:19,467 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:30:19,467 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:19,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:19,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:30:19,468 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:30:19,468 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:19,469 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:19,469 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:30:19,470 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740 2024-11-20T04:30:19,470 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740 2024-11-20T04:30:19,472 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:30:19,472 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:30:19,472 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:30:19,474 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:30:19,474 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760945, jitterRate=-0.032408446073532104}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:30:19,474 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T04:30:19,475 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732077019463Writing region info on filesystem at 1732077019463Initializing all the Stores at 1732077019463Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077019464 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077019464Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077019464Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077019464Cleaning up temporary data from old regions at 1732077019472 (+8 ms)Running coprocessor post-open hooks at 1732077019474 (+2 ms)Region opened successfully at 1732077019475 (+1 ms) 2024-11-20T04:30:19,476 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732077019442 2024-11-20T04:30:19,479 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T04:30:19,479 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T04:30:19,480 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,41203,1732077018566 2024-11-20T04:30:19,481 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,41203,1732077018566, state=OPEN 2024-11-20T04:30:19,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:30:19,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:30:19,486 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c2a32e16c274,41203,1732077018566 2024-11-20T04:30:19,486 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:30:19,486 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:30:19,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T04:30:19,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,41203,1732077018566 in 197 msec 2024-11-20T04:30:19,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T04:30:19,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-20T04:30:19,492 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:30:19,493 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T04:30:19,494 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:30:19,494 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,41203,1732077018566, seqNum=-1] 2024-11-20T04:30:19,494 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:30:19,496 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40827, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:30:19,501 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 680 msec 2024-11-20T04:30:19,501 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732077019501, completionTime=-1 2024-11-20T04:30:19,501 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T04:30:19,501 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T04:30:19,503 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T04:30:19,504 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732077079504 2024-11-20T04:30:19,504 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732077139504 2024-11-20T04:30:19,504 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-20T04:30:19,504 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39381,1732077018505-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:19,504 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39381,1732077018505-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:19,504 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39381,1732077018505-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:19,505 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c2a32e16c274:39381, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:19,505 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:19,505 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:19,506 DEBUG [master/c2a32e16c274:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T04:30:19,508 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.900sec 2024-11-20T04:30:19,509 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T04:30:19,509 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T04:30:19,509 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T04:30:19,509 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T04:30:19,509 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T04:30:19,509 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39381,1732077018505-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:30:19,509 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39381,1732077018505-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T04:30:19,511 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T04:30:19,511 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T04:30:19,512 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39381,1732077018505-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:19,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf6b78a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:30:19,584 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c2a32e16c274,39381,-1 for getting cluster id 2024-11-20T04:30:19,584 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T04:30:19,586 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3e511e8f-12b3-41ee-ac40-3479f67c7b90' 2024-11-20T04:30:19,587 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T04:30:19,587 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3e511e8f-12b3-41ee-ac40-3479f67c7b90" 2024-11-20T04:30:19,587 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c2192f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:30:19,587 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c2a32e16c274,39381,-1] 2024-11-20T04:30:19,587 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T04:30:19,588 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:19,589 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50636, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T04:30:19,590 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@285b0f3a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:30:19,590 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:30:19,591 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,41203,1732077018566, seqNum=-1] 2024-11-20T04:30:19,591 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:30:19,593 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58778, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:30:19,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c2a32e16c274,39381,1732077018505 2024-11-20T04:30:19,595 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:19,597 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T04:30:19,597 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-20T04:30:19,597 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-20T04:30:19,597 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T04:30:19,598 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is c2a32e16c274,39381,1732077018505 2024-11-20T04:30:19,598 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@10401f05 2024-11-20T04:30:19,599 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T04:30:19,600 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50650, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T04:30:19,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T04:30:19,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T04:30:19,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:30:19,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T04:30:19,603 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T04:30:19,604 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:19,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-20T04:30:19,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:30:19,605 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T04:30:19,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38659 is added to blk_1073741835_1011 (size=395) 2024-11-20T04:30:19,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39853 is added to blk_1073741835_1011 (size=395) 2024-11-20T04:30:19,615 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 54222d285bb8ea88d3153652b2ff5574, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4 2024-11-20T04:30:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38659 is added to blk_1073741836_1012 (size=78) 2024-11-20T04:30:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39853 is added to blk_1073741836_1012 (size=78) 2024-11-20T04:30:19,625 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:19,625 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 54222d285bb8ea88d3153652b2ff5574, disabling compactions & flushes 2024-11-20T04:30:19,625 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:19,625 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:19,625 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. after waiting 0 ms 2024-11-20T04:30:19,625 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:19,625 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:19,625 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 54222d285bb8ea88d3153652b2ff5574: Waiting for close lock at 1732077019625Disabling compacts and flushes for region at 1732077019625Disabling writes for close at 1732077019625Writing region close event to WAL at 1732077019625Closed at 1732077019625 2024-11-20T04:30:19,626 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T04:30:19,627 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732077019627"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732077019627"}]},"ts":"1732077019627"} 2024-11-20T04:30:19,630 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T04:30:19,632 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T04:30:19,632 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732077019632"}]},"ts":"1732077019632"} 2024-11-20T04:30:19,635 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-20T04:30:19,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=54222d285bb8ea88d3153652b2ff5574, ASSIGN}] 2024-11-20T04:30:19,637 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=54222d285bb8ea88d3153652b2ff5574, ASSIGN 2024-11-20T04:30:19,639 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=54222d285bb8ea88d3153652b2ff5574, ASSIGN; state=OFFLINE, location=c2a32e16c274,41203,1732077018566; forceNewPlan=false, retain=false 2024-11-20T04:30:19,789 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=54222d285bb8ea88d3153652b2ff5574, regionState=OPENING, regionLocation=c2a32e16c274,41203,1732077018566 2024-11-20T04:30:19,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=54222d285bb8ea88d3153652b2ff5574, ASSIGN because future has completed 2024-11-20T04:30:19,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54222d285bb8ea88d3153652b2ff5574, server=c2a32e16c274,41203,1732077018566}] 2024-11-20T04:30:19,950 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:19,950 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 54222d285bb8ea88d3153652b2ff5574, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:30:19,951 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,951 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:19,951 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,951 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,952 INFO [StoreOpener-54222d285bb8ea88d3153652b2ff5574-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,954 INFO [StoreOpener-54222d285bb8ea88d3153652b2ff5574-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 54222d285bb8ea88d3153652b2ff5574 columnFamilyName info 2024-11-20T04:30:19,954 DEBUG [StoreOpener-54222d285bb8ea88d3153652b2ff5574-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:19,954 INFO [StoreOpener-54222d285bb8ea88d3153652b2ff5574-1 {}] regionserver.HStore(327): Store=54222d285bb8ea88d3153652b2ff5574/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:30:19,954 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,955 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/default/TestLogRolling-testLogRollOnPipelineRestart/54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,956 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/default/TestLogRolling-testLogRollOnPipelineRestart/54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,956 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,956 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,958 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,960 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/default/TestLogRolling-testLogRollOnPipelineRestart/54222d285bb8ea88d3153652b2ff5574/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:30:19,960 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 54222d285bb8ea88d3153652b2ff5574; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696790, jitterRate=-0.11398649215698242}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T04:30:19,960 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:19,961 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 54222d285bb8ea88d3153652b2ff5574: Running coprocessor pre-open hook at 1732077019951Writing region info on filesystem at 1732077019951Initializing all the Stores at 1732077019952 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077019952Cleaning up temporary data from old regions at 1732077019956 (+4 ms)Running coprocessor post-open hooks at 1732077019960 (+4 ms)Region opened successfully at 1732077019961 (+1 ms) 2024-11-20T04:30:19,962 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574., pid=6, masterSystemTime=1732077019946 2024-11-20T04:30:19,965 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:19,965 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:19,966 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=54222d285bb8ea88d3153652b2ff5574, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,41203,1732077018566 2024-11-20T04:30:19,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54222d285bb8ea88d3153652b2ff5574, server=c2a32e16c274,41203,1732077018566 because future has completed 2024-11-20T04:30:19,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T04:30:19,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 54222d285bb8ea88d3153652b2ff5574, server=c2a32e16c274,41203,1732077018566 in 176 msec 2024-11-20T04:30:19,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T04:30:19,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=54222d285bb8ea88d3153652b2ff5574, ASSIGN in 337 msec 2024-11-20T04:30:19,976 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T04:30:19,976 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732077019976"}]},"ts":"1732077019976"} 2024-11-20T04:30:19,978 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-20T04:30:19,979 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T04:30:19,981 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 378 msec 2024-11-20T04:30:20,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:20,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:21,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T04:30:21,223 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T04:30:21,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T04:30:21,224 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-20T04:30:21,225 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:30:21,225 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T04:30:21,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:21,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:22,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:22,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:23,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:23,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:24,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:24,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:24,964 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T04:30:24,982 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:24,995 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T04:30:24,995 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-20T04:30:25,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:25,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:26,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:26,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:27,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:27,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:28,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:28,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:29,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:29,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:29,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:30:29,702 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-20T04:30:29,702 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-20T04:30:29,705 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T04:30:29,705 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:29,709 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574., hostname=c2a32e16c274,41203,1732077018566, seqNum=2] 2024-11-20T04:30:30,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:30,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:31,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:31,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:31,712 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 2024-11-20T04:30:31,712 WARN [ResponseProcessor for block BP-524018934-172.17.0.2-1732077017674:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-524018934-172.17.0.2-1732077017674:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:31,712 WARN [ResponseProcessor for block BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:39853,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:31,713 WARN [DataStreamer for file /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077018700 block BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK], DatanodeInfoWithStorage[127.0.0.1:39853,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39853,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]) is bad. 2024-11-20T04:30:31,713 WARN [DataStreamer for file /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta block BP-524018934-172.17.0.2-1732077017674:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-524018934-172.17.0.2-1732077017674:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39853,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK], DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39853,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]) is bad. 2024-11-20T04:30:31,713 WARN [ResponseProcessor for block BP-524018934-172.17.0.2-1732077017674:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-524018934-172.17.0.2-1732077017674:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:31,713 WARN [DataStreamer for file /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 block BP-524018934-172.17.0.2-1732077017674:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-524018934-172.17.0.2-1732077017674:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39853,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK], DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39853,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]) is bad. 2024-11-20T04:30:31,713 WARN [PacketResponder: BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39853] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,713 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_691395467_22 at /127.0.0.1:36976 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36976 dst: /127.0.0.1:38659 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,713 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221313750_22 at /127.0.0.1:36928 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36928 dst: /127.0.0.1:38659 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,713 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_691395467_22 at /127.0.0.1:60440 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39853:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60440 dst: /127.0.0.1:39853 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,713 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_691395467_22 at /127.0.0.1:60436 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39853:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60436 dst: /127.0.0.1:39853 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,714 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_691395467_22 at /127.0.0.1:36966 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36966 dst: /127.0.0.1:38659 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,714 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221313750_22 at /127.0.0.1:60422 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39853:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60422 dst: /127.0.0.1:39853 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78745d04{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:31,716 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ea5a7a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:31,716 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:31,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51031d29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:31,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@393a832c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:31,718 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:30:31,718 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:30:31,718 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:30:31,718 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-524018934-172.17.0.2-1732077017674 (Datanode Uuid e29a8ceb-33a1-47a2-aa99-54fe9f459a9f) service to localhost/127.0.0.1:39703 2024-11-20T04:30:31,719 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data3/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:31,719 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data4/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:31,719 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:30:31,726 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:31,730 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:31,730 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:31,730 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:31,730 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:30:31,731 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b68c165{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:31,731 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b408bc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:31,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5282eca5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/java.io.tmpdir/jetty-localhost-35811-hadoop-hdfs-3_4_1-tests_jar-_-any-4634082225967281093/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:31,848 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@156f820b{HTTP/1.1, (http/1.1)}{localhost:35811} 2024-11-20T04:30:31,848 INFO [Time-limited test {}] server.Server(415): Started @165926ms 2024-11-20T04:30:31,849 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:30:31,873 WARN [ResponseProcessor for block BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:31,873 WARN [ResponseProcessor for block BP-524018934-172.17.0.2-1732077017674:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-524018934-172.17.0.2-1732077017674:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:31,873 WARN [ResponseProcessor for block BP-524018934-172.17.0.2-1732077017674:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-524018934-172.17.0.2-1732077017674:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:31,873 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221313750_22 at /127.0.0.1:43026 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43026 dst: /127.0.0.1:38659 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,873 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_691395467_22 at /127.0.0.1:43040 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43040 dst: /127.0.0.1:38659 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,873 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_691395467_22 at /127.0.0.1:43038 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43038 dst: /127.0.0.1:38659 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:31,875 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7091f2a1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:31,875 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4360f0f4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:31,875 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:31,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e63449e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:31,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232fa1ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:31,877 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:30:31,877 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-524018934-172.17.0.2-1732077017674 (Datanode Uuid c4a40280-f54d-4d23-9f7a-e14c3e8341a9) service to localhost/127.0.0.1:39703 2024-11-20T04:30:31,877 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:30:31,877 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:30:31,879 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data1/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:31,879 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data2/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:31,879 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:30:31,889 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:31,892 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:31,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:31,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:31,892 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:30:31,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22d0d1b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:31,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ce7ccc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:31,944 WARN [Thread-1335 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:30:31,947 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9498ae19f9838d2 with lease ID 0x9e188147bdb89f0b: from storage DS-664ef650-2b74-4030-b463-59a5d0dce02b node DatanodeRegistration(127.0.0.1:34665, datanodeUuid=e29a8ceb-33a1-47a2-aa99-54fe9f459a9f, infoPort=33385, infoSecurePort=0, ipcPort=38311, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:31,947 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9498ae19f9838d2 with lease ID 0x9e188147bdb89f0b: from storage DS-a5d39f6e-9ff1-4c1a-8396-558d53f578a9 node DatanodeRegistration(127.0.0.1:34665, datanodeUuid=e29a8ceb-33a1-47a2-aa99-54fe9f459a9f, infoPort=33385, infoSecurePort=0, ipcPort=38311, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:32,014 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d8da7be{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/java.io.tmpdir/jetty-localhost-35835-hadoop-hdfs-3_4_1-tests_jar-_-any-5048520752322964851/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:32,014 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ca8564b{HTTP/1.1, (http/1.1)}{localhost:35835} 2024-11-20T04:30:32,014 INFO [Time-limited test {}] server.Server(415): Started @166093ms 2024-11-20T04:30:32,016 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:30:32,106 WARN [Thread-1366 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:30:32,109 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21c8f3a9bb84ef78 with lease ID 0x9e188147bdb89f0c: from storage DS-78289be3-b11f-4030-9499-2012da7b3c24 node DatanodeRegistration(127.0.0.1:43315, datanodeUuid=c4a40280-f54d-4d23-9f7a-e14c3e8341a9, infoPort=35749, infoSecurePort=0, ipcPort=37387, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:32,109 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21c8f3a9bb84ef78 with lease ID 0x9e188147bdb89f0c: from storage DS-a37f9a57-2e28-4ab8-824e-73edccf42b1e node DatanodeRegistration(127.0.0.1:43315, datanodeUuid=c4a40280-f54d-4d23-9f7a-e14c3e8341a9, infoPort=35749, infoSecurePort=0, ipcPort=37387, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:32,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:32,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:33,033 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-20T04:30:33,036 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-20T04:30:33,037 ERROR [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4-prefix:c2a32e16c274,41203,1732077018566 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:33,037 WARN [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4-prefix:c2a32e16c274,41203,1732077018566 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:33,037 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C41203%2C1732077018566:(num 1732077019086) roll requested 2024-11-20T04:30:33,037 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41203%2C1732077018566.1732077033037 2024-11-20T04:30:33,043 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 newFile=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 2024-11-20T04:30:33,043 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:33,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:33,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:33,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:33,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:33,044 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 2024-11-20T04:30:33,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:33,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:33,044 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 2024-11-20T04:30:33,045 WARN [IPC Server handler 1 on default port 39703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-20T04:30:33,045 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 after 1ms 2024-11-20T04:30:33,045 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35749:35749),(127.0.0.1/127.0.0.1:33385:33385)] 2024-11-20T04:30:33,045 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 is not closed yet, will try archiving it next time 2024-11-20T04:30:33,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:33,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:34,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:34,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:35,049 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-20T04:30:35,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:35,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:35,947 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T04:30:36,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:36,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:37,046 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 after 4002ms 2024-11-20T04:30:37,051 WARN [ResponseProcessor for block BP-524018934-172.17.0.2-1732077017674:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-524018934-172.17.0.2-1732077017674:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:37,052 WARN [DataStreamer for file /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 block BP-524018934-172.17.0.2-1732077017674:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-524018934-172.17.0.2-1732077017674:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43315,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK], DatanodeInfoWithStorage[127.0.0.1:34665,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43315,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]) is bad. 2024-11-20T04:30:37,052 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_691395467_22 at /127.0.0.1:43414 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43414 dst: /127.0.0.1:43315 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:37,052 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_691395467_22 at /127.0.0.1:49562 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:34665:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49562 dst: /127.0.0.1:34665 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:37,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d8da7be{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:37,054 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ca8564b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:37,054 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:37,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ce7ccc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:37,055 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22d0d1b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:37,055 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:30:37,055 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:30:37,056 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:30:37,056 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-524018934-172.17.0.2-1732077017674 (Datanode Uuid c4a40280-f54d-4d23-9f7a-e14c3e8341a9) service to localhost/127.0.0.1:39703 2024-11-20T04:30:37,056 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data1/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:37,056 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data2/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:37,057 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:30:37,067 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:37,070 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:37,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:37,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:37,071 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:30:37,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2392cae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:37,072 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5aaed393{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:37,186 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a90fb45{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/java.io.tmpdir/jetty-localhost-46635-hadoop-hdfs-3_4_1-tests_jar-_-any-5922296459194755981/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:37,187 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1dad3af2{HTTP/1.1, (http/1.1)}{localhost:46635} 2024-11-20T04:30:37,187 INFO [Time-limited test {}] server.Server(415): Started @171265ms 2024-11-20T04:30:37,188 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:30:37,205 WARN [ResponseProcessor for block BP-524018934-172.17.0.2-1732077017674:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-524018934-172.17.0.2-1732077017674:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:37,206 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_691395467_22 at /127.0.0.1:49582 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:34665:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49582 dst: /127.0.0.1:34665 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:37,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5282eca5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:37,212 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@156f820b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:37,212 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:37,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b408bc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:37,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b68c165{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:37,213 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:30:37,213 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:30:37,213 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-524018934-172.17.0.2-1732077017674 (Datanode Uuid e29a8ceb-33a1-47a2-aa99-54fe9f459a9f) service to localhost/127.0.0.1:39703 2024-11-20T04:30:37,213 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:30:37,215 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data3/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:37,215 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data4/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:37,215 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:30:37,225 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:37,229 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:37,231 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:37,231 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:37,231 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:30:37,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a9d36bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:37,232 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@787041af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:37,309 WARN [Thread-1409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:30:37,311 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54017daf9c1f0b6f with lease ID 0x9e188147bdb89f0d: from storage DS-78289be3-b11f-4030-9499-2012da7b3c24 node DatanodeRegistration(127.0.0.1:43021, datanodeUuid=c4a40280-f54d-4d23-9f7a-e14c3e8341a9, infoPort=34625, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:37,311 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54017daf9c1f0b6f with lease ID 0x9e188147bdb89f0d: from storage DS-a37f9a57-2e28-4ab8-824e-73edccf42b1e node DatanodeRegistration(127.0.0.1:43021, datanodeUuid=c4a40280-f54d-4d23-9f7a-e14c3e8341a9, infoPort=34625, infoSecurePort=0, ipcPort=46487, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:37,357 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20aef44d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/java.io.tmpdir/jetty-localhost-40307-hadoop-hdfs-3_4_1-tests_jar-_-any-12856009014407511619/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:37,358 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f2f2023{HTTP/1.1, (http/1.1)}{localhost:40307} 2024-11-20T04:30:37,358 INFO [Time-limited test {}] server.Server(415): Started @171436ms 2024-11-20T04:30:37,359 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:30:37,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:37,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:37,450 WARN [Thread-1440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:30:37,453 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc52ec64c6cd983ae with lease ID 0x9e188147bdb89f0e: from storage DS-664ef650-2b74-4030-b463-59a5d0dce02b node DatanodeRegistration(127.0.0.1:37765, datanodeUuid=e29a8ceb-33a1-47a2-aa99-54fe9f459a9f, infoPort=37919, infoSecurePort=0, ipcPort=39673, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T04:30:37,453 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc52ec64c6cd983ae with lease ID 0x9e188147bdb89f0e: from storage DS-a5d39f6e-9ff1-4c1a-8396-558d53f578a9 node DatanodeRegistration(127.0.0.1:37765, datanodeUuid=e29a8ceb-33a1-47a2-aa99-54fe9f459a9f, infoPort=37919, infoSecurePort=0, ipcPort=39673, storageInfo=lv=-57;cid=testClusterID;nsid=423194240;c=1732077017674), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:38,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:38,385 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-20T04:30:38,387 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-20T04:30:38,388 ERROR [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4-prefix:c2a32e16c274,41203,1732077018566 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34665,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:38,389 WARN [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4-prefix:c2a32e16c274,41203,1732077018566 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34665,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:38,389 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C41203%2C1732077018566:(num 1732077033037) roll requested 2024-11-20T04:30:38,389 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41203%2C1732077018566.1732077038389 2024-11-20T04:30:38,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:38,394 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 newFile=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077038389 2024-11-20T04:30:38,394 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:38,394 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:38,395 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:38,395 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:38,395 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:38,395 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077038389 2024-11-20T04:30:38,395 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34665,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:38,395 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34665,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:38,395 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 2024-11-20T04:30:38,396 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37919:37919),(127.0.0.1/127.0.0.1:34625:34625)] 2024-11-20T04:30:38,396 WARN [IPC Server handler 1 on default port 39703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-20T04:30:38,396 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 is not closed yet, will try archiving it next time 2024-11-20T04:30:38,396 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 after 1ms 2024-11-20T04:30:39,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:39,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:40,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:40,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:40,397 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41203%2C1732077018566.1732077040397 2024-11-20T04:30:40,402 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077038389 newFile=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 2024-11-20T04:30:40,402 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:40,402 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:40,402 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:40,403 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:40,403 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:40,403 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077038389 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 2024-11-20T04:30:40,404 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34625:34625),(127.0.0.1/127.0.0.1:37919:37919)] 2024-11-20T04:30:40,404 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 is not closed yet, will try archiving it next time 2024-11-20T04:30:40,404 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077038389 is not closed yet, will try archiving it next time 2024-11-20T04:30:40,404 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 2024-11-20T04:30:40,404 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 2024-11-20T04:30:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741838_1019 (size=1264) 2024-11-20T04:30:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741838_1019 (size=1264) 2024-11-20T04:30:40,405 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 after 1ms 2024-11-20T04:30:40,405 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 2024-11-20T04:30:40,406 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 is not closed yet, will try archiving it next time 2024-11-20T04:30:40,414 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732077019961/Put/vlen=218/seqid=0] 2024-11-20T04:30:40,414 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732077029710/Put/vlen=1045/seqid=0] 2024-11-20T04:30:40,414 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077019086 2024-11-20T04:30:40,414 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 2024-11-20T04:30:40,414 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 2024-11-20T04:30:40,415 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 after 1ms 2024-11-20T04:30:40,415 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 2024-11-20T04:30:40,417 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732077033037/Put/vlen=1045/seqid=0] 2024-11-20T04:30:40,417 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732077035050/Put/vlen=1045/seqid=0] 2024-11-20T04:30:40,417 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 2024-11-20T04:30:40,417 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077038389 2024-11-20T04:30:40,417 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077038389 2024-11-20T04:30:40,418 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077038389 after 1ms 2024-11-20T04:30:40,418 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077038389 2024-11-20T04:30:40,421 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732077038388/Put/vlen=1045/seqid=0] 2024-11-20T04:30:40,421 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 2024-11-20T04:30:40,421 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 2024-11-20T04:30:40,421 WARN [IPC Server handler 0 on default port 39703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-20T04:30:40,421 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 after 0ms 2024-11-20T04:30:41,313 WARN [ResponseProcessor for block BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:41,313 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221313750_22 at /127.0.0.1:58244 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37765:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58244 dst: /127.0.0.1:37765 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:41,313 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_221313750_22 at /127.0.0.1:59250 [Receiving block BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59250 dst: /127.0.0.1:43021 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43021 remote=/127.0.0.1:59250]. Total timeout mills is 60000, 59089 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:30:41,313 WARN [DataStreamer for file /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 block BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43021,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK], DatanodeInfoWithStorage[127.0.0.1:37765,DS-664ef650-2b74-4030-b463-59a5d0dce02b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43021,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]) is bad. 2024-11-20T04:30:41,314 WARN [DataStreamer for file /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 block BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:41,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741839_1022 (size=85) 2024-11-20T04:30:41,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741839_1022 (size=85) 2024-11-20T04:30:41,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:41,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:42,312 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T04:30:42,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:42,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:42,397 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077033037 after 4002ms 2024-11-20T04:30:43,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:43,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:44,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:44,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:44,422 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 after 4001ms 2024-11-20T04:30:44,422 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 2024-11-20T04:30:44,426 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 2024-11-20T04:30:44,426 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 54222d285bb8ea88d3153652b2ff5574 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-20T04:30:44,426 ERROR [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4-prefix:c2a32e16c274,41203,1732077018566 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:44,427 WARN [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4-prefix:c2a32e16c274,41203,1732077018566 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:44,427 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C41203%2C1732077018566:(num 1732077040397) roll requested 2024-11-20T04:30:44,427 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41203%2C1732077018566.1732077044427 2024-11-20T04:30:44,432 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 newFile=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077044427 2024-11-20T04:30:44,433 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,433 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,433 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,433 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,433 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,433 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077044427 2024-11-20T04:30:44,433 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:44,434 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-524018934-172.17.0.2-1732077017674:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor186.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:44,434 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 2024-11-20T04:30:44,434 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 after 0ms 2024-11-20T04:30:44,435 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37919:37919),(127.0.0.1/127.0.0.1:34625:34625)] 2024-11-20T04:30:44,435 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.1732077040397 to hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/oldWALs/c2a32e16c274%2C41203%2C1732077018566.1732077040397 2024-11-20T04:30:44,453 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/default/TestLogRolling-testLogRollOnPipelineRestart/54222d285bb8ea88d3153652b2ff5574/.tmp/info/8d098f402b684f8cb86c73980f950bc5 is 1080, key is row1002/info:/1732077029710/Put/seqid=0 2024-11-20T04:30:44,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741841_1024 (size=9270) 2024-11-20T04:30:44,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741841_1024 (size=9270) 2024-11-20T04:30:44,458 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/default/TestLogRolling-testLogRollOnPipelineRestart/54222d285bb8ea88d3153652b2ff5574/.tmp/info/8d098f402b684f8cb86c73980f950bc5 2024-11-20T04:30:44,465 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/default/TestLogRolling-testLogRollOnPipelineRestart/54222d285bb8ea88d3153652b2ff5574/.tmp/info/8d098f402b684f8cb86c73980f950bc5 as hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/default/TestLogRolling-testLogRollOnPipelineRestart/54222d285bb8ea88d3153652b2ff5574/info/8d098f402b684f8cb86c73980f950bc5 2024-11-20T04:30:44,470 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/default/TestLogRolling-testLogRollOnPipelineRestart/54222d285bb8ea88d3153652b2ff5574/info/8d098f402b684f8cb86c73980f950bc5, entries=4, sequenceid=8, filesize=9.1 K 2024-11-20T04:30:44,471 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 54222d285bb8ea88d3153652b2ff5574 in 45ms, sequenceid=8, compaction requested=false 2024-11-20T04:30:44,471 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 54222d285bb8ea88d3153652b2ff5574: 2024-11-20T04:30:44,471 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-20T04:30:44,472 ERROR [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4-prefix:c2a32e16c274,41203,1732077018566.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:44,472 WARN [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4-prefix:c2a32e16c274,41203,1732077018566.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:44,472 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C41203%2C1732077018566.meta:.meta(num 1732077019451) roll requested 2024-11-20T04:30:44,472 INFO [regionserver/c2a32e16c274:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41203%2C1732077018566.meta.1732077044472.meta 2024-11-20T04:30:44,477 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,477 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,477 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,478 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,478 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,478 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077044472.meta 2024-11-20T04:30:44,478 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:44,478 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:44,478 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta 2024-11-20T04:30:44,478 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37919:37919),(127.0.0.1/127.0.0.1:34625:34625)] 2024-11-20T04:30:44,479 DEBUG [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta is not closed yet, will try archiving it next time 2024-11-20T04:30:44,479 WARN [IPC Server handler 0 on default port 39703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1015 2024-11-20T04:30:44,479 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta after 1ms 2024-11-20T04:30:44,494 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/.tmp/info/9648e5fb80804b558d0dafaf0027487a is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574./info:regioninfo/1732077019965/Put/seqid=0 2024-11-20T04:30:44,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741843_1027 (size=7125) 2024-11-20T04:30:44,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741843_1027 (size=7125) 2024-11-20T04:30:44,500 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/.tmp/info/9648e5fb80804b558d0dafaf0027487a 2024-11-20T04:30:44,519 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/.tmp/ns/f3b60330f40645308215aeea4268134d is 43, key is default/ns:d/1732077019496/Put/seqid=0 2024-11-20T04:30:44,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741844_1028 (size=5153) 2024-11-20T04:30:44,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741844_1028 (size=5153) 2024-11-20T04:30:44,525 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/.tmp/ns/f3b60330f40645308215aeea4268134d 2024-11-20T04:30:44,545 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/.tmp/table/cdefaed32d57402881b649a347bbd8ac is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732077019976/Put/seqid=0 2024-11-20T04:30:44,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741845_1029 (size=5438) 2024-11-20T04:30:44,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741845_1029 (size=5438) 2024-11-20T04:30:44,550 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/.tmp/table/cdefaed32d57402881b649a347bbd8ac 2024-11-20T04:30:44,557 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/.tmp/info/9648e5fb80804b558d0dafaf0027487a as hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/info/9648e5fb80804b558d0dafaf0027487a 2024-11-20T04:30:44,562 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/info/9648e5fb80804b558d0dafaf0027487a, entries=10, sequenceid=11, filesize=7.0 K 2024-11-20T04:30:44,562 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/.tmp/ns/f3b60330f40645308215aeea4268134d as hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/ns/f3b60330f40645308215aeea4268134d 2024-11-20T04:30:44,567 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/ns/f3b60330f40645308215aeea4268134d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T04:30:44,568 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/.tmp/table/cdefaed32d57402881b649a347bbd8ac as hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/table/cdefaed32d57402881b649a347bbd8ac 2024-11-20T04:30:44,573 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/table/cdefaed32d57402881b649a347bbd8ac, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T04:30:44,574 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 103ms, sequenceid=11, compaction requested=false 2024-11-20T04:30:44,575 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T04:30:44,579 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T04:30:44,579 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:30:44,580 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:30:44,580 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:44,580 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:44,580 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T04:30:44,580 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T04:30:44,580 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1794427886, stopped=false 2024-11-20T04:30:44,580 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c2a32e16c274,39381,1732077018505 2024-11-20T04:30:44,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:30:44,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:44,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:30:44,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:44,582 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:30:44,583 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:30:44,583 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:30:44,583 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:30:44,583 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:30:44,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:44,583 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2a32e16c274,41203,1732077018566' ***** 2024-11-20T04:30:44,583 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T04:30:44,584 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T04:30:44,584 INFO [RS:0;c2a32e16c274:41203 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T04:30:44,584 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T04:30:44,584 INFO [RS:0;c2a32e16c274:41203 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T04:30:44,584 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(3091): Received CLOSE for 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:44,584 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(959): stopping server c2a32e16c274,41203,1732077018566 2024-11-20T04:30:44,584 INFO [RS:0;c2a32e16c274:41203 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:30:44,584 INFO [RS:0;c2a32e16c274:41203 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c2a32e16c274:41203. 2024-11-20T04:30:44,585 DEBUG [RS:0;c2a32e16c274:41203 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:30:44,584 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 54222d285bb8ea88d3153652b2ff5574, disabling compactions & flushes 2024-11-20T04:30:44,585 DEBUG [RS:0;c2a32e16c274:41203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:44,585 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:44,585 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:44,585 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T04:30:44,585 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. after waiting 0 ms 2024-11-20T04:30:44,585 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T04:30:44,585 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T04:30:44,585 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:44,585 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T04:30:44,585 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T04:30:44,585 DEBUG [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(1325): Online Regions={54222d285bb8ea88d3153652b2ff5574=TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T04:30:44,585 DEBUG [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 54222d285bb8ea88d3153652b2ff5574 2024-11-20T04:30:44,585 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:30:44,585 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:30:44,585 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:30:44,585 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:30:44,585 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:30:44,589 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/default/TestLogRolling-testLogRollOnPipelineRestart/54222d285bb8ea88d3153652b2ff5574/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-20T04:30:44,590 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T04:30:44,590 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:44,590 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 54222d285bb8ea88d3153652b2ff5574: Waiting for close lock at 1732077044584Running coprocessor pre-close hooks at 1732077044584Disabling compacts and flushes for region at 1732077044584Disabling writes for close at 1732077044585 (+1 ms)Writing region close event to WAL at 1732077044585Running coprocessor post-close hooks at 1732077044590 (+5 ms)Closed at 1732077044590 2024-11-20T04:30:44,590 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732077019600.54222d285bb8ea88d3153652b2ff5574. 2024-11-20T04:30:44,591 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:30:44,591 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:30:44,591 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732077044585Running coprocessor pre-close hooks at 1732077044585Disabling compacts and flushes for region at 1732077044585Disabling writes for close at 1732077044585Writing region close event to WAL at 1732077044587 (+2 ms)Running coprocessor post-close hooks at 1732077044590 (+3 ms)Closed at 1732077044591 (+1 ms) 2024-11-20T04:30:44,591 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T04:30:44,785 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(976): stopping server c2a32e16c274,41203,1732077018566; all regions closed. 2024-11-20T04:30:44,786 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,786 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,786 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,786 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,786 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:44,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741842_1025 (size=825) 2024-11-20T04:30:44,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741842_1025 (size=825) 2024-11-20T04:30:44,929 INFO [regionserver/c2a32e16c274:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T04:30:44,929 INFO [regionserver/c2a32e16c274:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T04:30:44,933 INFO [regionserver/c2a32e16c274:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:30:45,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:45,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:46,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:46,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:47,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:47,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:48,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:48,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:48,454 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T04:30:48,480 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta after 4002ms 2024-11-20T04:30:48,480 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/WALs/c2a32e16c274,41203,1732077018566/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta to hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/oldWALs/c2a32e16c274%2C41203%2C1732077018566.meta.1732077019451.meta 2024-11-20T04:30:48,483 DEBUG [RS:0;c2a32e16c274:41203 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/oldWALs 2024-11-20T04:30:48,483 INFO [RS:0;c2a32e16c274:41203 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C41203%2C1732077018566.meta:.meta(num 1732077044472) 2024-11-20T04:30:48,483 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,484 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,484 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,484 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,484 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741840_1023 (size=1162) 2024-11-20T04:30:48,486 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T04:30:48,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741840_1023 (size=1162) 2024-11-20T04:30:48,490 DEBUG [RS:0;c2a32e16c274:41203 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/oldWALs 2024-11-20T04:30:48,491 INFO [RS:0;c2a32e16c274:41203 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C41203%2C1732077018566:(num 1732077044427) 2024-11-20T04:30:48,491 DEBUG [RS:0;c2a32e16c274:41203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:48,491 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:30:48,491 INFO [RS:0;c2a32e16c274:41203 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:30:48,491 INFO [RS:0;c2a32e16c274:41203 {}] hbase.ChoreService(370): Chore service for: regionserver/c2a32e16c274:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T04:30:48,491 INFO [RS:0;c2a32e16c274:41203 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:30:48,491 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:30:48,491 INFO [RS:0;c2a32e16c274:41203 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41203 2024-11-20T04:30:48,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2a32e16c274,41203,1732077018566 2024-11-20T04:30:48,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:30:48,494 INFO [RS:0;c2a32e16c274:41203 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:30:48,494 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2a32e16c274,41203,1732077018566] 2024-11-20T04:30:48,498 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2a32e16c274,41203,1732077018566 already deleted, retry=false 2024-11-20T04:30:48,498 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2a32e16c274,41203,1732077018566 expired; onlineServers=0 2024-11-20T04:30:48,498 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c2a32e16c274,39381,1732077018505' ***** 2024-11-20T04:30:48,498 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T04:30:48,498 INFO [M:0;c2a32e16c274:39381 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:30:48,498 INFO [M:0;c2a32e16c274:39381 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:30:48,498 DEBUG [M:0;c2a32e16c274:39381 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T04:30:48,498 DEBUG [M:0;c2a32e16c274:39381 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T04:30:48,498 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T04:30:48,498 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077018830 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077018830,5,FailOnTimeoutGroup] 2024-11-20T04:30:48,498 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077018830 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077018830,5,FailOnTimeoutGroup] 2024-11-20T04:30:48,498 INFO [M:0;c2a32e16c274:39381 {}] hbase.ChoreService(370): Chore service for: master/c2a32e16c274:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T04:30:48,498 INFO [M:0;c2a32e16c274:39381 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:30:48,498 DEBUG [M:0;c2a32e16c274:39381 {}] master.HMaster(1795): Stopping service threads 2024-11-20T04:30:48,498 INFO [M:0;c2a32e16c274:39381 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T04:30:48,499 INFO [M:0;c2a32e16c274:39381 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:30:48,499 INFO [M:0;c2a32e16c274:39381 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T04:30:48,499 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T04:30:48,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T04:30:48,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:48,500 DEBUG [M:0;c2a32e16c274:39381 {}] zookeeper.ZKUtil(347): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T04:30:48,500 WARN [M:0;c2a32e16c274:39381 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T04:30:48,500 INFO [M:0;c2a32e16c274:39381 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/.lastflushedseqids 2024-11-20T04:30:48,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741846_1030 (size=130) 2024-11-20T04:30:48,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741846_1030 (size=130) 2024-11-20T04:30:48,506 INFO [M:0;c2a32e16c274:39381 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T04:30:48,506 INFO [M:0;c2a32e16c274:39381 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T04:30:48,506 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:30:48,506 INFO [M:0;c2a32e16c274:39381 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:48,506 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:48,506 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:30:48,506 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:48,506 INFO [M:0;c2a32e16c274:39381 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-20T04:30:48,507 ERROR [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData-prefix:c2a32e16c274,39381,1732077018505 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:48,507 WARN [FSHLog-0-hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData-prefix:c2a32e16c274,39381,1732077018505 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:48,507 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c2a32e16c274%2C39381%2C1732077018505:(num 1732077018700) roll requested 2024-11-20T04:30:48,507 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C39381%2C1732077018505.1732077048507 2024-11-20T04:30:48,513 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,513 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,513 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,513 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,513 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,513 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077018700 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077048507 2024-11-20T04:30:48,513 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:48,514 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38659,DS-78289be3-b11f-4030-9499-2012da7b3c24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T04:30:48,514 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077018700 2024-11-20T04:30:48,514 WARN [IPC Server handler 3 on default port 39703 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077018700 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-20T04:30:48,514 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077018700 after 0ms 2024-11-20T04:30:48,517 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37919:37919),(127.0.0.1/127.0.0.1:34625:34625)] 2024-11-20T04:30:48,517 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077018700 is not closed yet, will try archiving it next time 2024-11-20T04:30:48,533 DEBUG [M:0;c2a32e16c274:39381 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cbcfd8e34f574545b87573e877d2fde8 is 82, key is hbase:meta,,1/info:regioninfo/1732077019479/Put/seqid=0 2024-11-20T04:30:48,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741848_1033 (size=5672) 2024-11-20T04:30:48,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741848_1033 (size=5672) 2024-11-20T04:30:48,538 INFO [M:0;c2a32e16c274:39381 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cbcfd8e34f574545b87573e877d2fde8 2024-11-20T04:30:48,558 DEBUG [M:0;c2a32e16c274:39381 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2281ea87184b4cdf8be04e653088a4aa is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732077019980/Put/seqid=0 2024-11-20T04:30:48,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741849_1034 (size=6119) 2024-11-20T04:30:48,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741849_1034 (size=6119) 2024-11-20T04:30:48,564 INFO [M:0;c2a32e16c274:39381 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2281ea87184b4cdf8be04e653088a4aa 2024-11-20T04:30:48,584 DEBUG [M:0;c2a32e16c274:39381 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bd342c2c610d493f928d61addb45e746 is 69, key is c2a32e16c274,41203,1732077018566/rs:state/1732077018913/Put/seqid=0 2024-11-20T04:30:48,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741850_1035 (size=5156) 2024-11-20T04:30:48,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741850_1035 (size=5156) 2024-11-20T04:30:48,589 INFO [M:0;c2a32e16c274:39381 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bd342c2c610d493f928d61addb45e746 2024-11-20T04:30:48,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:48,597 INFO [RS:0;c2a32e16c274:41203 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:30:48,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41203-0x10133a3c8080001, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:48,597 INFO [RS:0;c2a32e16c274:41203 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2a32e16c274,41203,1732077018566; zookeeper connection closed. 2024-11-20T04:30:48,597 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a6bb9ae {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a6bb9ae 2024-11-20T04:30:48,598 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T04:30:48,616 DEBUG [M:0;c2a32e16c274:39381 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b5d6fd4eee2e47d89c7e309db8ae77c1 is 52, key is load_balancer_on/state:d/1732077019596/Put/seqid=0 2024-11-20T04:30:48,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741851_1036 (size=5056) 2024-11-20T04:30:48,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741851_1036 (size=5056) 2024-11-20T04:30:48,623 INFO [M:0;c2a32e16c274:39381 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b5d6fd4eee2e47d89c7e309db8ae77c1 2024-11-20T04:30:48,629 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cbcfd8e34f574545b87573e877d2fde8 as hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cbcfd8e34f574545b87573e877d2fde8 2024-11-20T04:30:48,636 INFO [M:0;c2a32e16c274:39381 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cbcfd8e34f574545b87573e877d2fde8, entries=8, sequenceid=56, filesize=5.5 K 2024-11-20T04:30:48,637 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2281ea87184b4cdf8be04e653088a4aa as hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2281ea87184b4cdf8be04e653088a4aa 2024-11-20T04:30:48,644 INFO [M:0;c2a32e16c274:39381 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2281ea87184b4cdf8be04e653088a4aa, entries=6, sequenceid=56, filesize=6.0 K 2024-11-20T04:30:48,645 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bd342c2c610d493f928d61addb45e746 as hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bd342c2c610d493f928d61addb45e746 2024-11-20T04:30:48,651 INFO [M:0;c2a32e16c274:39381 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bd342c2c610d493f928d61addb45e746, entries=1, sequenceid=56, filesize=5.0 K 2024-11-20T04:30:48,652 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b5d6fd4eee2e47d89c7e309db8ae77c1 as hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b5d6fd4eee2e47d89c7e309db8ae77c1 2024-11-20T04:30:48,658 INFO [M:0;c2a32e16c274:39381 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b5d6fd4eee2e47d89c7e309db8ae77c1, entries=1, sequenceid=56, filesize=4.9 K 2024-11-20T04:30:48,659 INFO [M:0;c2a32e16c274:39381 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=56, compaction requested=false 2024-11-20T04:30:48,661 INFO [M:0;c2a32e16c274:39381 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:48,661 DEBUG [M:0;c2a32e16c274:39381 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732077048506Disabling compacts and flushes for region at 1732077048506Disabling writes for close at 1732077048506Obtaining lock to block concurrent updates at 1732077048506Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732077048506Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732077048507 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732077048518 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732077048518Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732077048532 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732077048532Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732077048543 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732077048558 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732077048558Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732077048569 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732077048583 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732077048583Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732077048595 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732077048616 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732077048616Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ce41cc4: reopening flushed file at 1732077048629 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43016a1e: reopening flushed file at 1732077048636 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@625378f1: reopening flushed file at 1732077048644 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f3ed45a: reopening flushed file at 1732077048651 (+7 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=56, compaction requested=false at 1732077048659 (+8 ms)Writing region close event to WAL at 1732077048661 (+2 ms)Closed at 1732077048661 2024-11-20T04:30:48,661 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,661 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,661 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,661 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,662 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:30:48,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43021 is added to blk_1073741847_1031 (size=757) 2024-11-20T04:30:48,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37765 is added to blk_1073741847_1031 (size=757) 2024-11-20T04:30:49,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:49,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:49,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:49,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,117 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T04:30:50,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:50,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:50,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:51,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:30:51,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T04:30:51,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T04:30:51,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T04:30:51,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:51,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:51,453 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T04:30:52,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:52,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:52,515 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077018700 after 4001ms 2024-11-20T04:30:52,516 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/WALs/c2a32e16c274,39381,1732077018505/c2a32e16c274%2C39381%2C1732077018505.1732077018700 to hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/oldWALs/c2a32e16c274%2C39381%2C1732077018505.1732077018700 2024-11-20T04:30:52,518 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/MasterData/oldWALs/c2a32e16c274%2C39381%2C1732077018505.1732077018700 to hdfs://localhost:39703/user/jenkins/test-data/74e07189-ebc7-69d7-9e20-3dd8fb3c08c4/oldWALs/c2a32e16c274%2C39381%2C1732077018505.1732077018700$masterlocalwal$ 2024-11-20T04:30:52,518 INFO [M:0;c2a32e16c274:39381 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T04:30:52,518 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:30:52,518 INFO [M:0;c2a32e16c274:39381 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39381 2024-11-20T04:30:52,519 INFO [M:0;c2a32e16c274:39381 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:30:52,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:52,621 INFO [M:0;c2a32e16c274:39381 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:30:52,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39381-0x10133a3c8080000, quorum=127.0.0.1:49423, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:30:52,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20aef44d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:52,623 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f2f2023{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:52,623 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:52,624 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@787041af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:52,624 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a9d36bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:52,625 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:30:52,625 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:30:52,625 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:30:52,625 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-524018934-172.17.0.2-1732077017674 (Datanode Uuid e29a8ceb-33a1-47a2-aa99-54fe9f459a9f) service to localhost/127.0.0.1:39703 2024-11-20T04:30:52,626 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data3/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:52,626 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data4/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:52,626 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:30:52,628 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a90fb45{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:52,629 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1dad3af2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:52,629 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:52,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5aaed393{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:52,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2392cae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:52,630 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:30:52,630 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:30:52,630 WARN [BP-524018934-172.17.0.2-1732077017674 heartbeating to localhost/127.0.0.1:39703 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-524018934-172.17.0.2-1732077017674 (Datanode Uuid c4a40280-f54d-4d23-9f7a-e14c3e8341a9) service to localhost/127.0.0.1:39703 2024-11-20T04:30:52,630 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:30:52,631 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data1/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:52,631 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/cluster_057a4d28-bb12-c8fa-eab9-b260b78fe493/data/data2/current/BP-524018934-172.17.0.2-1732077017674 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:30:52,631 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:30:52,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3fab170c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:30:52,638 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d2191b9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:30:52,638 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:30:52,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77037455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:30:52,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444db7a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir/,STOPPED} 2024-11-20T04:30:52,644 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T04:30:52,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T04:30:52,670 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 153) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39703 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39703 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39703 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39703 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39703 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:39703 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39703 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39703 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=132 (was 210), ProcessCount=11 (was 11), AvailableMemoryMB=7175 (was 7566) 2024-11-20T04:30:52,678 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=132, ProcessCount=11, AvailableMemoryMB=7175 2024-11-20T04:30:52,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T04:30:52,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.log.dir so I do NOT create it in target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333 2024-11-20T04:30:52,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5ffdf4c9-43eb-090d-aad3-c17bd5c1de2c/hadoop.tmp.dir so I do NOT create it in target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333 2024-11-20T04:30:52,678 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448, deleteOnExit=true 2024-11-20T04:30:52,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/test.cache.data in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.log.dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T04:30:52,679 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:30:52,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T04:30:52,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/nfs.dump.dir in system properties and HBase conf 2024-11-20T04:30:52,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/java.io.tmpdir in system properties and HBase conf 2024-11-20T04:30:52,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:30:52,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T04:30:52,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T04:30:52,693 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:30:52,771 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:52,775 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:52,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:52,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:52,776 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:30:52,776 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:52,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b5b6aa1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:52,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38f5461{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:52,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1fc56883{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/java.io.tmpdir/jetty-localhost-41609-hadoop-hdfs-3_4_1-tests_jar-_-any-18354827399100740940/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:30:52,893 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7934466e{HTTP/1.1, (http/1.1)}{localhost:41609} 2024-11-20T04:30:52,893 INFO [Time-limited test {}] server.Server(415): Started @186972ms 2024-11-20T04:30:52,906 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:30:52,961 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:52,965 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:52,965 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:52,965 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:52,965 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:30:52,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719add8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:52,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fc981fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:53,083 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c41fb6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/java.io.tmpdir/jetty-localhost-43103-hadoop-hdfs-3_4_1-tests_jar-_-any-4860003581896611341/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:53,084 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@206f042f{HTTP/1.1, (http/1.1)}{localhost:43103} 2024-11-20T04:30:53,084 INFO [Time-limited test {}] server.Server(415): Started @187162ms 2024-11-20T04:30:53,085 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:30:53,114 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:30:53,117 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:30:53,118 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:30:53,118 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:30:53,118 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:30:53,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c13156e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:30:53,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5127cbf0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:30:53,177 WARN [Thread-1634 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/data/data1/current/BP-1214701570-172.17.0.2-1732077052710/current, will proceed with Du for space computation calculation, 2024-11-20T04:30:53,177 WARN [Thread-1635 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/data/data2/current/BP-1214701570-172.17.0.2-1732077052710/current, will proceed with Du for space computation calculation, 2024-11-20T04:30:53,200 WARN [Thread-1613 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:30:53,203 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfcb7abb6d0a7a8e2 with lease ID 0x293e28204c885774: Processing first storage report for DS-bdb29d48-0f43-4d37-bf77-29f89b1773e4 from datanode DatanodeRegistration(127.0.0.1:44143, datanodeUuid=751008ab-b53c-44c4-bc5a-0240c12ebb21, infoPort=43897, infoSecurePort=0, ipcPort=43129, storageInfo=lv=-57;cid=testClusterID;nsid=1769452805;c=1732077052710) 2024-11-20T04:30:53,203 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfcb7abb6d0a7a8e2 with lease ID 0x293e28204c885774: from storage DS-bdb29d48-0f43-4d37-bf77-29f89b1773e4 node DatanodeRegistration(127.0.0.1:44143, datanodeUuid=751008ab-b53c-44c4-bc5a-0240c12ebb21, infoPort=43897, infoSecurePort=0, ipcPort=43129, storageInfo=lv=-57;cid=testClusterID;nsid=1769452805;c=1732077052710), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:53,203 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfcb7abb6d0a7a8e2 with lease ID 0x293e28204c885774: Processing first storage report for DS-7ac81414-339e-49a6-a6bf-4a7b1fceef3a from datanode DatanodeRegistration(127.0.0.1:44143, datanodeUuid=751008ab-b53c-44c4-bc5a-0240c12ebb21, infoPort=43897, infoSecurePort=0, ipcPort=43129, storageInfo=lv=-57;cid=testClusterID;nsid=1769452805;c=1732077052710) 2024-11-20T04:30:53,203 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfcb7abb6d0a7a8e2 with lease ID 0x293e28204c885774: from storage DS-7ac81414-339e-49a6-a6bf-4a7b1fceef3a node DatanodeRegistration(127.0.0.1:44143, datanodeUuid=751008ab-b53c-44c4-bc5a-0240c12ebb21, infoPort=43897, infoSecurePort=0, ipcPort=43129, storageInfo=lv=-57;cid=testClusterID;nsid=1769452805;c=1732077052710), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:53,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b423220{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/java.io.tmpdir/jetty-localhost-45421-hadoop-hdfs-3_4_1-tests_jar-_-any-472888280225294324/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:30:53,242 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5aa33ca4{HTTP/1.1, (http/1.1)}{localhost:45421} 2024-11-20T04:30:53,242 INFO [Time-limited test {}] server.Server(415): Started @187320ms 2024-11-20T04:30:53,243 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:30:53,337 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/data/data3/current/BP-1214701570-172.17.0.2-1732077052710/current, will proceed with Du for space computation calculation, 2024-11-20T04:30:53,337 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/data/data4/current/BP-1214701570-172.17.0.2-1732077052710/current, will proceed with Du for space computation calculation, 2024-11-20T04:30:53,355 WARN [Thread-1649 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:30:53,357 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc69910a289d06023 with lease ID 0x293e28204c885775: Processing first storage report for DS-62f911ec-cfaa-4cf4-a8af-72c2381e983e from datanode DatanodeRegistration(127.0.0.1:45793, datanodeUuid=70bd8e52-49d4-4a85-9af8-03d310d41b72, infoPort=39125, infoSecurePort=0, ipcPort=39493, storageInfo=lv=-57;cid=testClusterID;nsid=1769452805;c=1732077052710) 2024-11-20T04:30:53,357 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc69910a289d06023 with lease ID 0x293e28204c885775: from storage DS-62f911ec-cfaa-4cf4-a8af-72c2381e983e node DatanodeRegistration(127.0.0.1:45793, datanodeUuid=70bd8e52-49d4-4a85-9af8-03d310d41b72, infoPort=39125, infoSecurePort=0, ipcPort=39493, storageInfo=lv=-57;cid=testClusterID;nsid=1769452805;c=1732077052710), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:53,357 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc69910a289d06023 with lease ID 0x293e28204c885775: Processing first storage report for DS-24e47812-0194-4185-9f16-3892a74feac9 from datanode DatanodeRegistration(127.0.0.1:45793, datanodeUuid=70bd8e52-49d4-4a85-9af8-03d310d41b72, infoPort=39125, infoSecurePort=0, ipcPort=39493, storageInfo=lv=-57;cid=testClusterID;nsid=1769452805;c=1732077052710) 2024-11-20T04:30:53,357 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc69910a289d06023 with lease ID 0x293e28204c885775: from storage DS-24e47812-0194-4185-9f16-3892a74feac9 node DatanodeRegistration(127.0.0.1:45793, datanodeUuid=70bd8e52-49d4-4a85-9af8-03d310d41b72, infoPort=39125, infoSecurePort=0, ipcPort=39493, storageInfo=lv=-57;cid=testClusterID;nsid=1769452805;c=1732077052710), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:30:53,366 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333 2024-11-20T04:30:53,368 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/zookeeper_0, clientPort=63088, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T04:30:53,369 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63088 2024-11-20T04:30:53,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:53,371 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:53,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:30:53,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:30:53,381 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca with version=8 2024-11-20T04:30:53,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/hbase-staging 2024-11-20T04:30:53,383 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:30:53,383 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:53,383 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:53,383 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:30:53,383 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:53,383 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:30:53,383 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T04:30:53,383 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:30:53,384 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44409 2024-11-20T04:30:53,385 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44409 connecting to ZooKeeper ensemble=127.0.0.1:63088 2024-11-20T04:30:53,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:53,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:444090x0, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:30:53,391 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44409-0x10133a450470000 connected 2024-11-20T04:30:53,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:53,408 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:53,410 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:53,412 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:30:53,412 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca, hbase.cluster.distributed=false 2024-11-20T04:30:53,414 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:30:53,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44409 2024-11-20T04:30:53,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44409 2024-11-20T04:30:53,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44409 2024-11-20T04:30:53,415 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44409 2024-11-20T04:30:53,415 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44409 2024-11-20T04:30:53,430 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:30:53,430 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:53,430 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:53,430 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:30:53,430 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:30:53,430 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:30:53,431 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T04:30:53,431 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:30:53,431 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40417 2024-11-20T04:30:53,433 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40417 connecting to ZooKeeper ensemble=127.0.0.1:63088 2024-11-20T04:30:53,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:53,435 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:53,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404170x0, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:30:53,439 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:404170x0, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:30:53,440 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40417-0x10133a450470001 connected 2024-11-20T04:30:53,440 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T04:30:53,440 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T04:30:53,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T04:30:53,442 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:30:53,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40417 2024-11-20T04:30:53,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40417 2024-11-20T04:30:53,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40417 2024-11-20T04:30:53,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40417 2024-11-20T04:30:53,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40417 2024-11-20T04:30:53,455 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c2a32e16c274:44409 2024-11-20T04:30:53,456 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c2a32e16c274,44409,1732077053382 2024-11-20T04:30:53,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:30:53,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:30:53,458 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c2a32e16c274,44409,1732077053382 2024-11-20T04:30:53,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T04:30:53,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,461 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T04:30:53,461 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c2a32e16c274,44409,1732077053382 from backup master directory 2024-11-20T04:30:53,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c2a32e16c274,44409,1732077053382 2024-11-20T04:30:53,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:30:53,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:30:53,463 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:30:53,463 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c2a32e16c274,44409,1732077053382 2024-11-20T04:30:53,467 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/hbase.id] with ID: a000b638-838d-4db7-9809-f3c538c331c5 2024-11-20T04:30:53,467 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/.tmp/hbase.id 2024-11-20T04:30:53,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:30:53,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:30:53,474 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/.tmp/hbase.id]:[hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/hbase.id] 2024-11-20T04:30:53,484 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:53,484 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T04:30:53,486 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-20T04:30:53,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:30:53,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:30:53,498 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:30:53,499 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T04:30:53,500 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:30:53,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:30:53,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:30:53,508 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store 2024-11-20T04:30:53,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:30:53,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:30:53,515 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:53,516 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:30:53,516 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:53,516 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:53,516 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:30:53,516 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:53,516 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:30:53,516 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732077053515Disabling compacts and flushes for region at 1732077053515Disabling writes for close at 1732077053516 (+1 ms)Writing region close event to WAL at 1732077053516Closed at 1732077053516 2024-11-20T04:30:53,517 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/.initializing 2024-11-20T04:30:53,517 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/WALs/c2a32e16c274,44409,1732077053382 2024-11-20T04:30:53,519 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C44409%2C1732077053382, suffix=, logDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/WALs/c2a32e16c274,44409,1732077053382, archiveDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/oldWALs, maxLogs=10 2024-11-20T04:30:53,520 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C44409%2C1732077053382.1732077053520 2024-11-20T04:30:53,525 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/WALs/c2a32e16c274,44409,1732077053382/c2a32e16c274%2C44409%2C1732077053382.1732077053520 2024-11-20T04:30:53,525 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39125:39125),(127.0.0.1/127.0.0.1:43897:43897)] 2024-11-20T04:30:53,526 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:30:53,526 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:53,526 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,526 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T04:30:53,529 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:53,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:53,530 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T04:30:53,531 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:53,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:30:53,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T04:30:53,532 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:53,533 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:30:53,533 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,534 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T04:30:53,534 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:53,534 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:30:53,534 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,535 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,535 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,537 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,537 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,537 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T04:30:53,538 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:30:53,540 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:30:53,540 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800560, jitterRate=0.01796509325504303}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T04:30:53,541 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732077053526Initializing all the Stores at 1732077053527 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077053527Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077053527Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077053527Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077053527Cleaning up temporary data from old regions at 1732077053537 (+10 ms)Region opened successfully at 1732077053541 (+4 ms) 2024-11-20T04:30:53,541 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T04:30:53,544 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@646573ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:30:53,545 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T04:30:53,545 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T04:30:53,545 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T04:30:53,545 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T04:30:53,546 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T04:30:53,546 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T04:30:53,546 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T04:30:53,548 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T04:30:53,549 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T04:30:53,550 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T04:30:53,550 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T04:30:53,551 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T04:30:53,553 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T04:30:53,553 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T04:30:53,554 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T04:30:53,555 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T04:30:53,556 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T04:30:53,557 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T04:30:53,558 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T04:30:53,559 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T04:30:53,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:30:53,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:30:53,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,563 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c2a32e16c274,44409,1732077053382, sessionid=0x10133a450470000, setting cluster-up flag (Was=false) 2024-11-20T04:30:53,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,571 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T04:30:53,572 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,44409,1732077053382 2024-11-20T04:30:53,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:53,580 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T04:30:53,581 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,44409,1732077053382 2024-11-20T04:30:53,582 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T04:30:53,584 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T04:30:53,584 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T04:30:53,584 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T04:30:53,584 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c2a32e16c274,44409,1732077053382 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T04:30:53,585 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:30:53,585 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:30:53,585 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:30:53,585 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:30:53,585 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c2a32e16c274:0, corePoolSize=10, maxPoolSize=10 2024-11-20T04:30:53,585 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,585 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:30:53,585 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,587 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732077083587 2024-11-20T04:30:53,587 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T04:30:53,587 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T04:30:53,587 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T04:30:53,587 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T04:30:53,587 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T04:30:53,588 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T04:30:53,588 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:30:53,588 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,588 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T04:30:53,588 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T04:30:53,588 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T04:30:53,588 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T04:30:53,588 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T04:30:53,588 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T04:30:53,589 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077053589,5,FailOnTimeoutGroup] 2024-11-20T04:30:53,589 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:53,589 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T04:30:53,592 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077053589,5,FailOnTimeoutGroup] 2024-11-20T04:30:53,592 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,592 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T04:30:53,592 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,592 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:30:53,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:30:53,598 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T04:30:53,598 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca 2024-11-20T04:30:53,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:30:53,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:30:53,605 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:53,607 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:30:53,608 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:30:53,608 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:53,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:53,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:30:53,610 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:30:53,610 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:53,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:53,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:30:53,612 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:30:53,612 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:53,612 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:53,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:30:53,614 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:30:53,614 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:53,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:53,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:30:53,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740 2024-11-20T04:30:53,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740 2024-11-20T04:30:53,616 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:30:53,616 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:30:53,617 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:30:53,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:30:53,620 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:30:53,620 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792886, jitterRate=0.008206799626350403}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:30:53,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732077053605Initializing all the Stores at 1732077053606 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077053606Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077053606Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077053606Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077053606Cleaning up temporary data from old regions at 1732077053616 (+10 ms)Region opened successfully at 1732077053621 (+5 ms) 2024-11-20T04:30:53,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:30:53,621 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:30:53,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:30:53,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:30:53,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:30:53,621 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:30:53,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732077053621Disabling compacts and flushes for region at 1732077053621Disabling writes for close at 1732077053621Writing region close event to WAL at 1732077053621Closed at 1732077053621 2024-11-20T04:30:53,622 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:30:53,623 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T04:30:53,623 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T04:30:53,624 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:30:53,625 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T04:30:53,645 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(746): ClusterId : a000b638-838d-4db7-9809-f3c538c331c5 2024-11-20T04:30:53,645 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T04:30:53,647 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T04:30:53,647 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T04:30:53,651 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T04:30:53,651 DEBUG [RS:0;c2a32e16c274:40417 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f2eeff3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:30:53,663 DEBUG [RS:0;c2a32e16c274:40417 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c2a32e16c274:40417 2024-11-20T04:30:53,663 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T04:30:53,663 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T04:30:53,663 DEBUG [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T04:30:53,664 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2a32e16c274,44409,1732077053382 with port=40417, startcode=1732077053430 2024-11-20T04:30:53,665 DEBUG [RS:0;c2a32e16c274:40417 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T04:30:53,667 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49355, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T04:30:53,667 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44409 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2a32e16c274,40417,1732077053430 2024-11-20T04:30:53,667 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44409 {}] master.ServerManager(517): Registering regionserver=c2a32e16c274,40417,1732077053430 2024-11-20T04:30:53,669 DEBUG [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca 2024-11-20T04:30:53,669 DEBUG [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45829 2024-11-20T04:30:53,669 DEBUG [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T04:30:53,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:30:53,672 DEBUG [RS:0;c2a32e16c274:40417 {}] zookeeper.ZKUtil(111): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2a32e16c274,40417,1732077053430 2024-11-20T04:30:53,672 WARN [RS:0;c2a32e16c274:40417 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:30:53,672 INFO [RS:0;c2a32e16c274:40417 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:30:53,672 DEBUG [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430 2024-11-20T04:30:53,672 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2a32e16c274,40417,1732077053430] 2024-11-20T04:30:53,676 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T04:30:53,677 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T04:30:53,677 INFO [RS:0;c2a32e16c274:40417 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T04:30:53,678 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,678 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T04:30:53,678 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T04:30:53,678 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:30:53,679 DEBUG [RS:0;c2a32e16c274:40417 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:30:53,681 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,681 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,681 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,681 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,681 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,681 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,40417,1732077053430-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:30:53,697 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T04:30:53,697 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,40417,1732077053430-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,697 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,697 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.Replication(171): c2a32e16c274,40417,1732077053430 started 2024-11-20T04:30:53,712 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:53,712 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(1482): Serving as c2a32e16c274,40417,1732077053430, RpcServer on c2a32e16c274/172.17.0.2:40417, sessionid=0x10133a450470001 2024-11-20T04:30:53,712 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T04:30:53,712 DEBUG [RS:0;c2a32e16c274:40417 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2a32e16c274,40417,1732077053430 2024-11-20T04:30:53,712 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,40417,1732077053430' 2024-11-20T04:30:53,712 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T04:30:53,713 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T04:30:53,714 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T04:30:53,714 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T04:30:53,714 DEBUG [RS:0;c2a32e16c274:40417 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2a32e16c274,40417,1732077053430 2024-11-20T04:30:53,714 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,40417,1732077053430' 2024-11-20T04:30:53,714 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T04:30:53,714 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T04:30:53,714 DEBUG [RS:0;c2a32e16c274:40417 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T04:30:53,714 INFO [RS:0;c2a32e16c274:40417 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T04:30:53,714 INFO [RS:0;c2a32e16c274:40417 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T04:30:53,776 WARN [c2a32e16c274:44409 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T04:30:53,816 INFO [RS:0;c2a32e16c274:40417 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C40417%2C1732077053430, suffix=, logDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430, archiveDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/oldWALs, maxLogs=32 2024-11-20T04:30:53,817 INFO [RS:0;c2a32e16c274:40417 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40417%2C1732077053430.1732077053817 2024-11-20T04:30:53,823 INFO [RS:0;c2a32e16c274:40417 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077053817 2024-11-20T04:30:53,824 DEBUG [RS:0;c2a32e16c274:40417 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43897:43897),(127.0.0.1/127.0.0.1:39125:39125)] 2024-11-20T04:30:54,026 DEBUG [c2a32e16c274:44409 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T04:30:54,027 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c2a32e16c274,40417,1732077053430 2024-11-20T04:30:54,028 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,40417,1732077053430, state=OPENING 2024-11-20T04:30:54,030 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T04:30:54,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:54,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:30:54,033 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:30:54,033 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:30:54,033 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:30:54,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,40417,1732077053430}] 2024-11-20T04:30:54,186 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T04:30:54,188 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46015, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T04:30:54,192 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T04:30:54,192 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:30:54,194 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C40417%2C1732077053430.meta, suffix=.meta, logDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430, archiveDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/oldWALs, maxLogs=32 2024-11-20T04:30:54,194 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40417%2C1732077053430.meta.1732077054194.meta 2024-11-20T04:30:54,199 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.meta.1732077054194.meta 2024-11-20T04:30:54,200 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39125:39125),(127.0.0.1/127.0.0.1:43897:43897)] 2024-11-20T04:30:54,201 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:30:54,201 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T04:30:54,201 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T04:30:54,202 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T04:30:54,202 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T04:30:54,202 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:54,202 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T04:30:54,202 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T04:30:54,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:30:54,204 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:30:54,204 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:54,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:54,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:30:54,205 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:30:54,205 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:54,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:54,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:30:54,206 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:30:54,207 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:54,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:54,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:30:54,208 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:30:54,208 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:54,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:30:54,208 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:30:54,209 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740 2024-11-20T04:30:54,210 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740 2024-11-20T04:30:54,211 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:30:54,211 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:30:54,211 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:30:54,212 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:30:54,213 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861042, jitterRate=0.09487210214138031}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:30:54,213 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T04:30:54,214 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732077054202Writing region info on filesystem at 1732077054202Initializing all the Stores at 1732077054203 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077054203Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077054203Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077054203Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077054203Cleaning up temporary data from old regions at 1732077054211 (+8 ms)Running coprocessor post-open hooks at 1732077054213 (+2 ms)Region opened successfully at 1732077054214 (+1 ms) 2024-11-20T04:30:54,215 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732077054186 2024-11-20T04:30:54,217 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T04:30:54,218 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T04:30:54,218 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,40417,1732077053430 2024-11-20T04:30:54,219 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,40417,1732077053430, state=OPEN 2024-11-20T04:30:54,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:30:54,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:30:54,230 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c2a32e16c274,40417,1732077053430 2024-11-20T04:30:54,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:30:54,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:30:54,233 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T04:30:54,233 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,40417,1732077053430 in 197 msec 2024-11-20T04:30:54,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T04:30:54,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 610 msec 2024-11-20T04:30:54,236 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:30:54,236 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T04:30:54,238 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:30:54,238 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,40417,1732077053430, seqNum=-1] 2024-11-20T04:30:54,238 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:30:54,240 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58335, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:30:54,245 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 661 msec 2024-11-20T04:30:54,245 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732077054245, completionTime=-1 2024-11-20T04:30:54,245 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T04:30:54,245 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T04:30:54,247 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T04:30:54,247 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732077114247 2024-11-20T04:30:54,247 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732077174247 2024-11-20T04:30:54,247 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-20T04:30:54,248 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,44409,1732077053382-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:54,248 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,44409,1732077053382-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:54,248 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,44409,1732077053382-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:54,248 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c2a32e16c274:44409, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:54,248 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:54,248 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:54,250 DEBUG [master/c2a32e16c274:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T04:30:54,252 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.789sec 2024-11-20T04:30:54,252 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T04:30:54,252 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T04:30:54,252 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T04:30:54,252 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T04:30:54,252 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T04:30:54,252 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,44409,1732077053382-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:30:54,252 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,44409,1732077053382-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T04:30:54,254 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T04:30:54,254 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T04:30:54,255 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,44409,1732077053382-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:30:54,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47810059, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:30:54,345 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c2a32e16c274,44409,-1 for getting cluster id 2024-11-20T04:30:54,345 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T04:30:54,347 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a000b638-838d-4db7-9809-f3c538c331c5' 2024-11-20T04:30:54,348 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T04:30:54,348 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a000b638-838d-4db7-9809-f3c538c331c5" 2024-11-20T04:30:54,348 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@358797fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:30:54,348 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c2a32e16c274,44409,-1] 2024-11-20T04:30:54,349 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T04:30:54,349 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:30:54,350 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52838, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T04:30:54,351 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1af897b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:30:54,351 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:30:54,352 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,40417,1732077053430, seqNum=-1] 2024-11-20T04:30:54,352 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:30:54,353 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38082, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:30:54,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c2a32e16c274,44409,1732077053382 2024-11-20T04:30:54,355 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:30:54,358 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T04:30:54,358 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T04:30:54,359 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is c2a32e16c274,44409,1732077053382 2024-11-20T04:30:54,359 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@687e85d6 2024-11-20T04:30:54,359 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T04:30:54,360 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52844, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T04:30:54,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T04:30:54,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T04:30:54,361 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:30:54,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:30:54,363 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T04:30:54,364 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:54,364 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-20T04:30:54,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:30:54,364 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T04:30:54,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741835_1011 (size=405) 2024-11-20T04:30:54,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741835_1011 (size=405) 2024-11-20T04:30:54,373 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5c53ef8a89a7f4c9fbea6b78b0c51100, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca 2024-11-20T04:30:54,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741836_1012 (size=88) 2024-11-20T04:30:54,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741836_1012 (size=88) 2024-11-20T04:30:54,379 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:54,379 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 5c53ef8a89a7f4c9fbea6b78b0c51100, disabling compactions & flushes 2024-11-20T04:30:54,379 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:30:54,379 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:30:54,379 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. after waiting 0 ms 2024-11-20T04:30:54,379 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:30:54,379 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:30:54,379 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5c53ef8a89a7f4c9fbea6b78b0c51100: Waiting for close lock at 1732077054379Disabling compacts and flushes for region at 1732077054379Disabling writes for close at 1732077054379Writing region close event to WAL at 1732077054379Closed at 1732077054379 2024-11-20T04:30:54,381 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T04:30:54,381 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732077054381"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732077054381"}]},"ts":"1732077054381"} 2024-11-20T04:30:54,383 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T04:30:54,384 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T04:30:54,385 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732077054385"}]},"ts":"1732077054385"} 2024-11-20T04:30:54,387 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-20T04:30:54,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5c53ef8a89a7f4c9fbea6b78b0c51100, ASSIGN}] 2024-11-20T04:30:54,389 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5c53ef8a89a7f4c9fbea6b78b0c51100, ASSIGN 2024-11-20T04:30:54,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:54,390 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5c53ef8a89a7f4c9fbea6b78b0c51100, ASSIGN; state=OFFLINE, location=c2a32e16c274,40417,1732077053430; forceNewPlan=false, retain=false 2024-11-20T04:30:54,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:54,541 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5c53ef8a89a7f4c9fbea6b78b0c51100, regionState=OPENING, regionLocation=c2a32e16c274,40417,1732077053430 2024-11-20T04:30:54,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5c53ef8a89a7f4c9fbea6b78b0c51100, ASSIGN because future has completed 2024-11-20T04:30:54,544 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c53ef8a89a7f4c9fbea6b78b0c51100, server=c2a32e16c274,40417,1732077053430}] 2024-11-20T04:30:54,700 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:30:54,701 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5c53ef8a89a7f4c9fbea6b78b0c51100, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:30:54,701 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,701 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:30:54,701 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,701 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,702 INFO [StoreOpener-5c53ef8a89a7f4c9fbea6b78b0c51100-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,704 INFO [StoreOpener-5c53ef8a89a7f4c9fbea6b78b0c51100-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c53ef8a89a7f4c9fbea6b78b0c51100 columnFamilyName info 2024-11-20T04:30:54,704 DEBUG [StoreOpener-5c53ef8a89a7f4c9fbea6b78b0c51100-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:30:54,704 INFO [StoreOpener-5c53ef8a89a7f4c9fbea6b78b0c51100-1 {}] regionserver.HStore(327): Store=5c53ef8a89a7f4c9fbea6b78b0c51100/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:30:54,704 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,705 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,705 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,706 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,706 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,707 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,709 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:30:54,709 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5c53ef8a89a7f4c9fbea6b78b0c51100; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816800, jitterRate=0.038616061210632324}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T04:30:54,710 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:30:54,710 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5c53ef8a89a7f4c9fbea6b78b0c51100: Running coprocessor pre-open hook at 1732077054701Writing region info on filesystem at 1732077054701Initializing all the Stores at 1732077054702 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077054702Cleaning up temporary data from old regions at 1732077054706 (+4 ms)Running coprocessor post-open hooks at 1732077054710 (+4 ms)Region opened successfully at 1732077054710 2024-11-20T04:30:54,711 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100., pid=6, masterSystemTime=1732077054696 2024-11-20T04:30:54,713 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:30:54,714 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:30:54,714 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5c53ef8a89a7f4c9fbea6b78b0c51100, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,40417,1732077053430 2024-11-20T04:30:54,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c53ef8a89a7f4c9fbea6b78b0c51100, server=c2a32e16c274,40417,1732077053430 because future has completed 2024-11-20T04:30:54,720 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T04:30:54,721 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5c53ef8a89a7f4c9fbea6b78b0c51100, server=c2a32e16c274,40417,1732077053430 in 175 msec 2024-11-20T04:30:54,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T04:30:54,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5c53ef8a89a7f4c9fbea6b78b0c51100, ASSIGN in 334 msec 2024-11-20T04:30:54,724 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T04:30:54,724 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732077054724"}]},"ts":"1732077054724"} 2024-11-20T04:30:54,727 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-20T04:30:54,728 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T04:30:54,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 367 msec 2024-11-20T04:30:55,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:55,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:56,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:56,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:56,725 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T04:30:56,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:56,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:30:57,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:57,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:58,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:58,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:59,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:59,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:30:59,676 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T04:30:59,676 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-20T04:31:00,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:00,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:01,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T04:31:01,223 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T04:31:01,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:31:01,224 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T04:31:01,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T04:31:01,224 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T04:31:01,225 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:01,225 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T04:31:01,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:01,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:02,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:02,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:03,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:03,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:04,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:31:04,371 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T04:31:04,371 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-20T04:31:04,374 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:04,374 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:04,376 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100., hostname=c2a32e16c274,40417,1732077053430, seqNum=2] 2024-11-20T04:31:04,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:04,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:04,389 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T04:31:04,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-20T04:31:04,390 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T04:31:04,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T04:31:04,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:04,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:04,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-20T04:31:04,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:04,552 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5c53ef8a89a7f4c9fbea6b78b0c51100 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T04:31:04,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/0e99f700c0454af7994b157b2349b602 is 1080, key is row0001/info:/1732077064377/Put/seqid=0 2024-11-20T04:31:04,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741837_1013 (size=6033) 2024-11-20T04:31:04,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741837_1013 (size=6033) 2024-11-20T04:31:04,575 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/0e99f700c0454af7994b157b2349b602 2024-11-20T04:31:04,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/0e99f700c0454af7994b157b2349b602 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/0e99f700c0454af7994b157b2349b602 2024-11-20T04:31:04,586 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/0e99f700c0454af7994b157b2349b602, entries=1, sequenceid=5, filesize=5.9 K 2024-11-20T04:31:04,587 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5c53ef8a89a7f4c9fbea6b78b0c51100 in 35ms, sequenceid=5, compaction requested=false 2024-11-20T04:31:04,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5c53ef8a89a7f4c9fbea6b78b0c51100: 2024-11-20T04:31:04,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:04,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-20T04:31:04,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-20T04:31:04,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-20T04:31:04,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-20T04:31:04,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 210 msec 2024-11-20T04:31:05,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:05,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:06,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:06,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:07,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:07,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:08,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:08,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:09,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:09,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:10,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:10,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:11,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:11,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:12,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:12,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:13,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:13,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:14,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:14,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:14,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-20T04:31:14,412 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T04:31:14,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:14,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:14,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-20T04:31:14,417 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T04:31:14,418 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T04:31:14,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T04:31:14,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-20T04:31:14,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:14,572 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 5c53ef8a89a7f4c9fbea6b78b0c51100 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T04:31:14,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/882c7302ccb84cc3b828abae0d516798 is 1080, key is row0002/info:/1732077074413/Put/seqid=0 2024-11-20T04:31:14,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741838_1014 (size=6033) 2024-11-20T04:31:14,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741838_1014 (size=6033) 2024-11-20T04:31:14,581 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/882c7302ccb84cc3b828abae0d516798 2024-11-20T04:31:14,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/882c7302ccb84cc3b828abae0d516798 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/882c7302ccb84cc3b828abae0d516798 2024-11-20T04:31:14,591 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/882c7302ccb84cc3b828abae0d516798, entries=1, sequenceid=9, filesize=5.9 K 2024-11-20T04:31:14,593 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5c53ef8a89a7f4c9fbea6b78b0c51100 in 22ms, sequenceid=9, compaction requested=false 2024-11-20T04:31:14,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 5c53ef8a89a7f4c9fbea6b78b0c51100: 2024-11-20T04:31:14,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:14,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-20T04:31:14,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-20T04:31:14,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T04:31:14,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-20T04:31:14,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-20T04:31:15,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:15,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:16,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:16,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:17,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:17,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:18,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:18,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 after 68046ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor193.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:31:18,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:18,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta after 68032ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor193.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T04:31:19,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:19,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:20,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:20,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:21,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:21,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:22,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:22,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:23,366 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T04:31:23,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:23,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:24,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:24,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-20T04:31:24,492 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T04:31:24,495 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40417%2C1732077053430.1732077084494 2024-11-20T04:31:24,500 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:24,501 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:24,501 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:24,501 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:24,501 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:24,501 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077053817 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077084494 2024-11-20T04:31:24,502 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43897:43897),(127.0.0.1/127.0.0.1:39125:39125)] 2024-11-20T04:31:24,502 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077053817 is not closed yet, will try archiving it next time 2024-11-20T04:31:24,502 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:24,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741833_1009 (size=5546) 2024-11-20T04:31:24,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741833_1009 (size=5546) 2024-11-20T04:31:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-20T04:31:24,505 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T04:31:24,506 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T04:31:24,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T04:31:24,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-20T04:31:24,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:24,660 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 5c53ef8a89a7f4c9fbea6b78b0c51100 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T04:31:24,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/47d773f4796c49c79c6cccc2151140dc is 1080, key is row0003/info:/1732077084493/Put/seqid=0 2024-11-20T04:31:24,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741840_1016 (size=6033) 2024-11-20T04:31:24,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741840_1016 (size=6033) 2024-11-20T04:31:24,669 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/47d773f4796c49c79c6cccc2151140dc 2024-11-20T04:31:24,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/47d773f4796c49c79c6cccc2151140dc as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/47d773f4796c49c79c6cccc2151140dc 2024-11-20T04:31:24,680 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/47d773f4796c49c79c6cccc2151140dc, entries=1, sequenceid=13, filesize=5.9 K 2024-11-20T04:31:24,682 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5c53ef8a89a7f4c9fbea6b78b0c51100 in 23ms, sequenceid=13, compaction requested=true 2024-11-20T04:31:24,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 5c53ef8a89a7f4c9fbea6b78b0c51100: 2024-11-20T04:31:24,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:24,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-20T04:31:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-20T04:31:24,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-20T04:31:24,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-20T04:31:24,688 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-20T04:31:25,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:25,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:26,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:26,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:27,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:27,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:28,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:28,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:29,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:29,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:30,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:30,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:31,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:31,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:32,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:32,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:33,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:33,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:34,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:34,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:34,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-20T04:31:34,612 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T04:31:34,612 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:31:34,613 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:31:34,613 DEBUG [Time-limited test {}] regionserver.HStore(1541): 5c53ef8a89a7f4c9fbea6b78b0c51100/info is initiating minor compaction (all files) 2024-11-20T04:31:34,613 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T04:31:34,613 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:34,614 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 5c53ef8a89a7f4c9fbea6b78b0c51100/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:34,614 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/0e99f700c0454af7994b157b2349b602, hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/882c7302ccb84cc3b828abae0d516798, hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/47d773f4796c49c79c6cccc2151140dc] into tmpdir=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp, totalSize=17.7 K 2024-11-20T04:31:34,614 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0e99f700c0454af7994b157b2349b602, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732077064377 2024-11-20T04:31:34,614 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 882c7302ccb84cc3b828abae0d516798, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732077074413 2024-11-20T04:31:34,615 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 47d773f4796c49c79c6cccc2151140dc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732077084493 2024-11-20T04:31:34,626 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 5c53ef8a89a7f4c9fbea6b78b0c51100#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:31:34,627 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/0c5fb33d3b124ec79319c8f82b708de6 is 1080, key is row0001/info:/1732077064377/Put/seqid=0 2024-11-20T04:31:34,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741841_1017 (size=8296) 2024-11-20T04:31:34,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741841_1017 (size=8296) 2024-11-20T04:31:34,638 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/0c5fb33d3b124ec79319c8f82b708de6 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/0c5fb33d3b124ec79319c8f82b708de6 2024-11-20T04:31:34,644 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5c53ef8a89a7f4c9fbea6b78b0c51100/info of 5c53ef8a89a7f4c9fbea6b78b0c51100 into 0c5fb33d3b124ec79319c8f82b708de6(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:31:34,644 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 5c53ef8a89a7f4c9fbea6b78b0c51100: 2024-11-20T04:31:34,647 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40417%2C1732077053430.1732077094647 2024-11-20T04:31:34,652 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:34,652 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:34,653 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:34,653 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:34,653 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:34,653 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077084494 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077094647 2024-11-20T04:31:34,653 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39125:39125),(127.0.0.1/127.0.0.1:43897:43897)] 2024-11-20T04:31:34,654 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077084494 is not closed yet, will try archiving it next time 2024-11-20T04:31:34,654 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077053817 to hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/oldWALs/c2a32e16c274%2C40417%2C1732077053430.1732077053817 2024-11-20T04:31:34,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741839_1015 (size=2520) 2024-11-20T04:31:34,655 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:34,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741839_1015 (size=2520) 2024-11-20T04:31:34,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:34,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-20T04:31:34,657 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T04:31:34,658 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T04:31:34,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T04:31:34,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-20T04:31:34,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:34,811 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 5c53ef8a89a7f4c9fbea6b78b0c51100 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T04:31:34,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/2c38bb7d2ee34e42927c1ad61a30db06 is 1080, key is row0000/info:/1732077094645/Put/seqid=0 2024-11-20T04:31:34,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741843_1019 (size=6033) 2024-11-20T04:31:34,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741843_1019 (size=6033) 2024-11-20T04:31:34,821 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/2c38bb7d2ee34e42927c1ad61a30db06 2024-11-20T04:31:34,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/2c38bb7d2ee34e42927c1ad61a30db06 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/2c38bb7d2ee34e42927c1ad61a30db06 2024-11-20T04:31:34,830 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/2c38bb7d2ee34e42927c1ad61a30db06, entries=1, sequenceid=18, filesize=5.9 K 2024-11-20T04:31:34,831 INFO [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5c53ef8a89a7f4c9fbea6b78b0c51100 in 20ms, sequenceid=18, compaction requested=false 2024-11-20T04:31:34,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 5c53ef8a89a7f4c9fbea6b78b0c51100: 2024-11-20T04:31:34,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:34,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-20T04:31:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-20T04:31:34,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-20T04:31:34,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 175 msec 2024-11-20T04:31:34,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-11-20T04:31:34,889 INFO [master/c2a32e16c274:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T04:31:34,889 INFO [master/c2a32e16c274:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T04:31:35,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:35,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:36,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:36,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:37,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:37,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:38,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:38,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:39,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:39,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:39,701 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5c53ef8a89a7f4c9fbea6b78b0c51100, had cached 0 bytes from a total of 14329 2024-11-20T04:31:40,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:40,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:41,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:41,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:42,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:42,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:43,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:43,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:44,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:44,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:44,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44409 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-20T04:31:44,751 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T04:31:44,754 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C40417%2C1732077053430.1732077104754 2024-11-20T04:31:44,760 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,760 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,760 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,760 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,760 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,760 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077094647 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077104754 2024-11-20T04:31:44,761 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43897:43897),(127.0.0.1/127.0.0.1:39125:39125)] 2024-11-20T04:31:44,761 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077094647 is not closed yet, will try archiving it next time 2024-11-20T04:31:44,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T04:31:44,761 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/WALs/c2a32e16c274,40417,1732077053430/c2a32e16c274%2C40417%2C1732077053430.1732077084494 to hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/oldWALs/c2a32e16c274%2C40417%2C1732077053430.1732077084494 2024-11-20T04:31:44,761 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:31:44,761 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:31:44,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:31:44,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:31:44,762 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T04:31:44,762 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T04:31:44,762 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1055589973, stopped=false 2024-11-20T04:31:44,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741842_1018 (size=2026) 2024-11-20T04:31:44,762 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c2a32e16c274,44409,1732077053382 2024-11-20T04:31:44,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741842_1018 (size=2026) 2024-11-20T04:31:44,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:31:44,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:31:44,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:44,764 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:31:44,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:44,764 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:31:44,764 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:31:44,764 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:31:44,764 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2a32e16c274,40417,1732077053430' ***** 2024-11-20T04:31:44,764 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T04:31:44,764 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:31:44,765 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:31:44,765 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(878): Closing user regions 2024-11-20T04:31:44,765 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(3091): Received CLOSE for 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:31:44,765 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5c53ef8a89a7f4c9fbea6b78b0c51100, disabling compactions & flushes 2024-11-20T04:31:44,765 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:44,765 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:44,765 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. after waiting 0 ms 2024-11-20T04:31:44,765 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:44,765 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5c53ef8a89a7f4c9fbea6b78b0c51100 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T04:31:44,766 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T04:31:44,766 INFO [RS:0;c2a32e16c274:40417 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T04:31:44,766 INFO [RS:0;c2a32e16c274:40417 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T04:31:44,766 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T04:31:44,766 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(959): stopping server c2a32e16c274,40417,1732077053430 2024-11-20T04:31:44,766 INFO [RS:0;c2a32e16c274:40417 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:31:44,766 INFO [RS:0;c2a32e16c274:40417 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c2a32e16c274:40417. 2024-11-20T04:31:44,766 DEBUG [RS:0;c2a32e16c274:40417 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:31:44,766 DEBUG [RS:0;c2a32e16c274:40417 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:31:44,766 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T04:31:44,766 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T04:31:44,767 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T04:31:44,767 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T04:31:44,767 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T04:31:44,767 DEBUG [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5c53ef8a89a7f4c9fbea6b78b0c51100=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.} 2024-11-20T04:31:44,767 DEBUG [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5c53ef8a89a7f4c9fbea6b78b0c51100 2024-11-20T04:31:44,767 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:31:44,767 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:31:44,767 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:31:44,767 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:31:44,767 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:31:44,767 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-20T04:31:44,769 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/936893c6ac4041f491cb64038fe61832 is 1080, key is row0001/info:/1732077104753/Put/seqid=0 2024-11-20T04:31:44,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741845_1021 (size=6033) 2024-11-20T04:31:44,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741845_1021 (size=6033) 2024-11-20T04:31:44,777 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/936893c6ac4041f491cb64038fe61832 2024-11-20T04:31:44,782 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/.tmp/info/936893c6ac4041f491cb64038fe61832 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/936893c6ac4041f491cb64038fe61832 2024-11-20T04:31:44,783 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/.tmp/info/111ded9ef1ea4c749d39bc63085f307f is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100./info:regioninfo/1732077054714/Put/seqid=0 2024-11-20T04:31:44,787 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/936893c6ac4041f491cb64038fe61832, entries=1, sequenceid=22, filesize=5.9 K 2024-11-20T04:31:44,788 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5c53ef8a89a7f4c9fbea6b78b0c51100 in 23ms, sequenceid=22, compaction requested=true 2024-11-20T04:31:44,789 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/0e99f700c0454af7994b157b2349b602, hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/882c7302ccb84cc3b828abae0d516798, hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/47d773f4796c49c79c6cccc2151140dc] to archive 2024-11-20T04:31:44,789 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T04:31:44,791 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/0e99f700c0454af7994b157b2349b602 to hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/0e99f700c0454af7994b157b2349b602 2024-11-20T04:31:44,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741846_1022 (size=7308) 2024-11-20T04:31:44,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741846_1022 (size=7308) 2024-11-20T04:31:44,793 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/.tmp/info/111ded9ef1ea4c749d39bc63085f307f 2024-11-20T04:31:44,793 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/882c7302ccb84cc3b828abae0d516798 to hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/882c7302ccb84cc3b828abae0d516798 2024-11-20T04:31:44,794 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/47d773f4796c49c79c6cccc2151140dc to hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/info/47d773f4796c49c79c6cccc2151140dc 2024-11-20T04:31:44,794 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c2a32e16c274:44409 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T04:31:44,794 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0e99f700c0454af7994b157b2349b602=6033, 882c7302ccb84cc3b828abae0d516798=6033, 47d773f4796c49c79c6cccc2151140dc=6033] 2024-11-20T04:31:44,798 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5c53ef8a89a7f4c9fbea6b78b0c51100/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-20T04:31:44,799 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:44,799 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5c53ef8a89a7f4c9fbea6b78b0c51100: Waiting for close lock at 1732077104765Running coprocessor pre-close hooks at 1732077104765Disabling compacts and flushes for region at 1732077104765Disabling writes for close at 1732077104765Obtaining lock to block concurrent updates at 1732077104765Preparing flush snapshotting stores in 5c53ef8a89a7f4c9fbea6b78b0c51100 at 1732077104765Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732077104765Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. at 1732077104766 (+1 ms)Flushing 5c53ef8a89a7f4c9fbea6b78b0c51100/info: creating writer at 1732077104766Flushing 5c53ef8a89a7f4c9fbea6b78b0c51100/info: appending metadata at 1732077104769 (+3 ms)Flushing 5c53ef8a89a7f4c9fbea6b78b0c51100/info: closing flushed file at 1732077104769Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33d09e1c: reopening flushed file at 1732077104782 (+13 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5c53ef8a89a7f4c9fbea6b78b0c51100 in 23ms, sequenceid=22, compaction requested=true at 1732077104788 (+6 ms)Writing region close event to WAL at 1732077104795 (+7 ms)Running coprocessor post-close hooks at 1732077104799 (+4 ms)Closed at 1732077104799 2024-11-20T04:31:44,799 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732077054361.5c53ef8a89a7f4c9fbea6b78b0c51100. 2024-11-20T04:31:44,812 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/.tmp/ns/3b124e5ad078455682af3fed3ee59fc8 is 43, key is default/ns:d/1732077054240/Put/seqid=0 2024-11-20T04:31:44,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741847_1023 (size=5153) 2024-11-20T04:31:44,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741847_1023 (size=5153) 2024-11-20T04:31:44,817 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/.tmp/ns/3b124e5ad078455682af3fed3ee59fc8 2024-11-20T04:31:44,836 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/.tmp/table/21be599ca035487685602cb55873fd82 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732077054724/Put/seqid=0 2024-11-20T04:31:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741848_1024 (size=5508) 2024-11-20T04:31:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741848_1024 (size=5508) 2024-11-20T04:31:44,841 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/.tmp/table/21be599ca035487685602cb55873fd82 2024-11-20T04:31:44,846 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/.tmp/info/111ded9ef1ea4c749d39bc63085f307f as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/info/111ded9ef1ea4c749d39bc63085f307f 2024-11-20T04:31:44,850 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/info/111ded9ef1ea4c749d39bc63085f307f, entries=10, sequenceid=11, filesize=7.1 K 2024-11-20T04:31:44,851 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/.tmp/ns/3b124e5ad078455682af3fed3ee59fc8 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/ns/3b124e5ad078455682af3fed3ee59fc8 2024-11-20T04:31:44,856 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/ns/3b124e5ad078455682af3fed3ee59fc8, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T04:31:44,857 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/.tmp/table/21be599ca035487685602cb55873fd82 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/table/21be599ca035487685602cb55873fd82 2024-11-20T04:31:44,861 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/table/21be599ca035487685602cb55873fd82, entries=2, sequenceid=11, filesize=5.4 K 2024-11-20T04:31:44,862 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 95ms, sequenceid=11, compaction requested=false 2024-11-20T04:31:44,867 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T04:31:44,867 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:31:44,867 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:31:44,867 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732077104767Running coprocessor pre-close hooks at 1732077104767Disabling compacts and flushes for region at 1732077104767Disabling writes for close at 1732077104767Obtaining lock to block concurrent updates at 1732077104767Preparing flush snapshotting stores in 1588230740 at 1732077104767Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732077104767Flushing stores of hbase:meta,,1.1588230740 at 1732077104768 (+1 ms)Flushing 1588230740/info: creating writer at 1732077104768Flushing 1588230740/info: appending metadata at 1732077104783 (+15 ms)Flushing 1588230740/info: closing flushed file at 1732077104783Flushing 1588230740/ns: creating writer at 1732077104798 (+15 ms)Flushing 1588230740/ns: appending metadata at 1732077104811 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1732077104811Flushing 1588230740/table: creating writer at 1732077104822 (+11 ms)Flushing 1588230740/table: appending metadata at 1732077104835 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732077104836 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42d6180e: reopening flushed file at 1732077104845 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4aafa6d0: reopening flushed file at 1732077104850 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d0a764c: reopening flushed file at 1732077104856 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 95ms, sequenceid=11, compaction requested=false at 1732077104862 (+6 ms)Writing region close event to WAL at 1732077104864 (+2 ms)Running coprocessor post-close hooks at 1732077104867 (+3 ms)Closed at 1732077104867 2024-11-20T04:31:44,867 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T04:31:44,967 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(976): stopping server c2a32e16c274,40417,1732077053430; all regions closed. 2024-11-20T04:31:44,967 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,968 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,968 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,968 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,968 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741834_1010 (size=3306) 2024-11-20T04:31:44,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741834_1010 (size=3306) 2024-11-20T04:31:44,972 DEBUG [RS:0;c2a32e16c274:40417 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/oldWALs 2024-11-20T04:31:44,972 INFO [RS:0;c2a32e16c274:40417 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C40417%2C1732077053430.meta:.meta(num 1732077054194) 2024-11-20T04:31:44,973 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,973 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,973 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,973 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,973 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:44,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741844_1020 (size=1252) 2024-11-20T04:31:44,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741844_1020 (size=1252) 2024-11-20T04:31:44,977 DEBUG [RS:0;c2a32e16c274:40417 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/oldWALs 2024-11-20T04:31:44,977 INFO [RS:0;c2a32e16c274:40417 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C40417%2C1732077053430:(num 1732077104754) 2024-11-20T04:31:44,977 DEBUG [RS:0;c2a32e16c274:40417 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:31:44,977 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:31:44,977 INFO [RS:0;c2a32e16c274:40417 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:31:44,977 INFO [RS:0;c2a32e16c274:40417 {}] hbase.ChoreService(370): Chore service for: regionserver/c2a32e16c274:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T04:31:44,978 INFO [RS:0;c2a32e16c274:40417 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:31:44,978 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:31:44,978 INFO [RS:0;c2a32e16c274:40417 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40417 2024-11-20T04:31:44,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2a32e16c274,40417,1732077053430 2024-11-20T04:31:44,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:31:44,980 INFO [RS:0;c2a32e16c274:40417 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:31:44,981 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2a32e16c274,40417,1732077053430] 2024-11-20T04:31:44,984 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2a32e16c274,40417,1732077053430 already deleted, retry=false 2024-11-20T04:31:44,984 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2a32e16c274,40417,1732077053430 expired; onlineServers=0 2024-11-20T04:31:44,984 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c2a32e16c274,44409,1732077053382' ***** 2024-11-20T04:31:44,984 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T04:31:44,984 INFO [M:0;c2a32e16c274:44409 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:31:44,984 INFO [M:0;c2a32e16c274:44409 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:31:44,984 DEBUG [M:0;c2a32e16c274:44409 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T04:31:44,984 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T04:31:44,984 DEBUG [M:0;c2a32e16c274:44409 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T04:31:44,984 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077053589 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077053589,5,FailOnTimeoutGroup] 2024-11-20T04:31:44,984 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077053589 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077053589,5,FailOnTimeoutGroup] 2024-11-20T04:31:44,985 INFO [M:0;c2a32e16c274:44409 {}] hbase.ChoreService(370): Chore service for: master/c2a32e16c274:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T04:31:44,985 INFO [M:0;c2a32e16c274:44409 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:31:44,985 DEBUG [M:0;c2a32e16c274:44409 {}] master.HMaster(1795): Stopping service threads 2024-11-20T04:31:44,985 INFO [M:0;c2a32e16c274:44409 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T04:31:44,985 INFO [M:0;c2a32e16c274:44409 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:31:44,985 INFO [M:0;c2a32e16c274:44409 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T04:31:44,985 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T04:31:44,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T04:31:44,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:44,986 DEBUG [M:0;c2a32e16c274:44409 {}] zookeeper.ZKUtil(347): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T04:31:44,986 WARN [M:0;c2a32e16c274:44409 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T04:31:44,986 INFO [M:0;c2a32e16c274:44409 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/.lastflushedseqids 2024-11-20T04:31:44,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741849_1025 (size=130) 2024-11-20T04:31:44,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741849_1025 (size=130) 2024-11-20T04:31:44,991 INFO [M:0;c2a32e16c274:44409 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T04:31:44,991 INFO [M:0;c2a32e16c274:44409 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T04:31:44,991 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:31:44,992 INFO [M:0;c2a32e16c274:44409 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:31:44,992 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:31:44,992 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:31:44,992 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:31:44,992 INFO [M:0;c2a32e16c274:44409 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-20T04:31:45,007 DEBUG [M:0;c2a32e16c274:44409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38221c6aec0041fda5310c7f8d2390ad is 82, key is hbase:meta,,1/info:regioninfo/1732077054218/Put/seqid=0 2024-11-20T04:31:45,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741850_1026 (size=5672) 2024-11-20T04:31:45,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741850_1026 (size=5672) 2024-11-20T04:31:45,011 INFO [M:0;c2a32e16c274:44409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38221c6aec0041fda5310c7f8d2390ad 2024-11-20T04:31:45,030 DEBUG [M:0;c2a32e16c274:44409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/23306bd2cbc9450db0563e86714a3ad7 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732077054729/Put/seqid=0 2024-11-20T04:31:45,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741851_1027 (size=7823) 2024-11-20T04:31:45,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741851_1027 (size=7823) 2024-11-20T04:31:45,035 INFO [M:0;c2a32e16c274:44409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/23306bd2cbc9450db0563e86714a3ad7 2024-11-20T04:31:45,039 INFO [M:0;c2a32e16c274:44409 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 23306bd2cbc9450db0563e86714a3ad7 2024-11-20T04:31:45,052 DEBUG [M:0;c2a32e16c274:44409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7e42abb7cd654de39f356e51b9e2c9c6 is 69, key is c2a32e16c274,40417,1732077053430/rs:state/1732077053668/Put/seqid=0 2024-11-20T04:31:45,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741852_1028 (size=5156) 2024-11-20T04:31:45,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741852_1028 (size=5156) 2024-11-20T04:31:45,057 INFO [M:0;c2a32e16c274:44409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7e42abb7cd654de39f356e51b9e2c9c6 2024-11-20T04:31:45,075 DEBUG [M:0;c2a32e16c274:44409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e33f938ef8104e1f9e8261f87150e100 is 52, key is load_balancer_on/state:d/1732077054357/Put/seqid=0 2024-11-20T04:31:45,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741853_1029 (size=5056) 2024-11-20T04:31:45,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741853_1029 (size=5056) 2024-11-20T04:31:45,080 INFO [M:0;c2a32e16c274:44409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e33f938ef8104e1f9e8261f87150e100 2024-11-20T04:31:45,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:31:45,081 INFO [RS:0;c2a32e16c274:40417 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:31:45,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40417-0x10133a450470001, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:31:45,082 INFO [RS:0;c2a32e16c274:40417 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2a32e16c274,40417,1732077053430; zookeeper connection closed. 2024-11-20T04:31:45,082 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3746510b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3746510b 2024-11-20T04:31:45,082 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T04:31:45,084 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38221c6aec0041fda5310c7f8d2390ad as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/38221c6aec0041fda5310c7f8d2390ad 2024-11-20T04:31:45,088 INFO [M:0;c2a32e16c274:44409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/38221c6aec0041fda5310c7f8d2390ad, entries=8, sequenceid=121, filesize=5.5 K 2024-11-20T04:31:45,089 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/23306bd2cbc9450db0563e86714a3ad7 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/23306bd2cbc9450db0563e86714a3ad7 2024-11-20T04:31:45,093 INFO [M:0;c2a32e16c274:44409 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 23306bd2cbc9450db0563e86714a3ad7 2024-11-20T04:31:45,093 INFO [M:0;c2a32e16c274:44409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/23306bd2cbc9450db0563e86714a3ad7, entries=14, sequenceid=121, filesize=7.6 K 2024-11-20T04:31:45,094 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7e42abb7cd654de39f356e51b9e2c9c6 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7e42abb7cd654de39f356e51b9e2c9c6 2024-11-20T04:31:45,098 INFO [M:0;c2a32e16c274:44409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7e42abb7cd654de39f356e51b9e2c9c6, entries=1, sequenceid=121, filesize=5.0 K 2024-11-20T04:31:45,098 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e33f938ef8104e1f9e8261f87150e100 as hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e33f938ef8104e1f9e8261f87150e100 2024-11-20T04:31:45,102 INFO [M:0;c2a32e16c274:44409 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45829/user/jenkins/test-data/d13d3b2b-442d-22e8-8b6e-bbfcbced11ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e33f938ef8104e1f9e8261f87150e100, entries=1, sequenceid=121, filesize=4.9 K 2024-11-20T04:31:45,103 INFO [M:0;c2a32e16c274:44409 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=121, compaction requested=false 2024-11-20T04:31:45,105 INFO [M:0;c2a32e16c274:44409 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:31:45,105 DEBUG [M:0;c2a32e16c274:44409 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732077104991Disabling compacts and flushes for region at 1732077104991Disabling writes for close at 1732077104992 (+1 ms)Obtaining lock to block concurrent updates at 1732077104992Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732077104992Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1732077104992Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732077104993 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732077104993Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732077105006 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732077105007 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732077105015 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732077105029 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732077105029Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732077105039 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732077105052 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732077105052Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732077105061 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732077105075 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732077105075Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70820ea7: reopening flushed file at 1732077105084 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30c195a7: reopening flushed file at 1732077105089 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e879a67: reopening flushed file at 1732077105093 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73a7c161: reopening flushed file at 1732077105098 (+5 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=121, compaction requested=false at 1732077105103 (+5 ms)Writing region close event to WAL at 1732077105105 (+2 ms)Closed at 1732077105105 2024-11-20T04:31:45,106 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:45,106 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:45,106 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:45,106 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:45,106 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:31:45,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44143 is added to blk_1073741830_1006 (size=53035) 2024-11-20T04:31:45,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45793 is added to blk_1073741830_1006 (size=53035) 2024-11-20T04:31:45,109 INFO [M:0;c2a32e16c274:44409 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T04:31:45,109 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:31:45,109 INFO [M:0;c2a32e16c274:44409 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44409 2024-11-20T04:31:45,110 INFO [M:0;c2a32e16c274:44409 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:31:45,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:31:45,211 INFO [M:0;c2a32e16c274:44409 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:31:45,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44409-0x10133a450470000, quorum=127.0.0.1:63088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:31:45,214 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b423220{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:31:45,214 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5aa33ca4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:31:45,214 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:31:45,214 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5127cbf0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:31:45,214 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c13156e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.log.dir/,STOPPED} 2024-11-20T04:31:45,216 WARN [BP-1214701570-172.17.0.2-1732077052710 heartbeating to localhost/127.0.0.1:45829 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:31:45,216 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:31:45,216 WARN [BP-1214701570-172.17.0.2-1732077052710 heartbeating to localhost/127.0.0.1:45829 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1214701570-172.17.0.2-1732077052710 (Datanode Uuid 70bd8e52-49d4-4a85-9af8-03d310d41b72) service to localhost/127.0.0.1:45829 2024-11-20T04:31:45,216 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:31:45,217 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/data/data3/current/BP-1214701570-172.17.0.2-1732077052710 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:31:45,217 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/data/data4/current/BP-1214701570-172.17.0.2-1732077052710 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:31:45,217 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:31:45,219 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c41fb6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:31:45,219 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@206f042f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:31:45,219 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:31:45,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fc981fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:31:45,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719add8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.log.dir/,STOPPED} 2024-11-20T04:31:45,221 WARN [BP-1214701570-172.17.0.2-1732077052710 heartbeating to localhost/127.0.0.1:45829 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:31:45,221 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:31:45,221 WARN [BP-1214701570-172.17.0.2-1732077052710 heartbeating to localhost/127.0.0.1:45829 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1214701570-172.17.0.2-1732077052710 (Datanode Uuid 751008ab-b53c-44c4-bc5a-0240c12ebb21) service to localhost/127.0.0.1:45829 2024-11-20T04:31:45,221 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:31:45,222 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/data/data1/current/BP-1214701570-172.17.0.2-1732077052710 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:31:45,222 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/cluster_277009b9-144d-c8e4-dc37-d1137d962448/data/data2/current/BP-1214701570-172.17.0.2-1732077052710 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:31:45,222 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:31:45,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1fc56883{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:31:45,229 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7934466e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:31:45,229 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:31:45,229 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38f5461{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:31:45,229 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b5b6aa1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.log.dir/,STOPPED} 2024-11-20T04:31:45,235 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T04:31:45,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T04:31:45,259 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45829 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45829 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:45829 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45829 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45829 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/c2a32e16c274:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45829 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45829 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45829 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45829 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=57 (was 132), ProcessCount=11 (was 11), AvailableMemoryMB=7124 (was 7175) 2024-11-20T04:31:45,266 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=57, ProcessCount=11, AvailableMemoryMB=7124 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.log.dir so I do NOT create it in target/test-data/710fa355-84f4-8096-d240-df0283177c0b 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/12aade18-4c3c-4484-382a-0d65f1f68333/hadoop.tmp.dir so I do NOT create it in target/test-data/710fa355-84f4-8096-d240-df0283177c0b 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7, deleteOnExit=true 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/test.cache.data in system properties and HBase conf 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.log.dir in system properties and HBase conf 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T04:31:45,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T04:31:45,267 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/nfs.dump.dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/java.io.tmpdir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T04:31:45,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T04:31:45,281 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:31:45,342 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:31:45,345 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:31:45,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:31:45,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:31:45,346 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:31:45,347 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:31:45,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33382c80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:31:45,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5327e2a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:31:45,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:45,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:45,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1520cb76{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/java.io.tmpdir/jetty-localhost-45421-hadoop-hdfs-3_4_1-tests_jar-_-any-13224968406055096111/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:31:45,462 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65b546b1{HTTP/1.1, (http/1.1)}{localhost:45421} 2024-11-20T04:31:45,462 INFO [Time-limited test {}] server.Server(415): Started @239541ms 2024-11-20T04:31:45,475 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:31:45,545 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:31:45,547 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:31:45,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:31:45,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:31:45,548 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:31:45,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16ccf5f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:31:45,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2735da07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:31:45,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f932cc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/java.io.tmpdir/jetty-localhost-38253-hadoop-hdfs-3_4_1-tests_jar-_-any-18204275023416089931/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:31:45,662 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ff7780b{HTTP/1.1, (http/1.1)}{localhost:38253} 2024-11-20T04:31:45,662 INFO [Time-limited test {}] server.Server(415): Started @239740ms 2024-11-20T04:31:45,663 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:31:45,684 INFO [regionserver/c2a32e16c274:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:31:45,694 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:31:45,696 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:31:45,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:31:45,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:31:45,697 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:31:45,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25dcc129{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:31:45,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26a9d62d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:31:45,757 WARN [Thread-1951 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/data/data1/current/BP-1347059229-172.17.0.2-1732077105287/current, will proceed with Du for space computation calculation, 2024-11-20T04:31:45,757 WARN [Thread-1952 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/data/data2/current/BP-1347059229-172.17.0.2-1732077105287/current, will proceed with Du for space computation calculation, 2024-11-20T04:31:45,773 WARN [Thread-1930 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:31:45,775 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5fcc3d81fd9a934d with lease ID 0x5efa75d6ebfc62d6: Processing first storage report for DS-2e50bd0f-8c2e-4f8e-b277-b0afe58eedd8 from datanode DatanodeRegistration(127.0.0.1:43619, datanodeUuid=079a8ecb-ae18-47ab-b528-e7a0ef038e0d, infoPort=37567, infoSecurePort=0, ipcPort=38303, storageInfo=lv=-57;cid=testClusterID;nsid=1349542505;c=1732077105287) 2024-11-20T04:31:45,775 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5fcc3d81fd9a934d with lease ID 0x5efa75d6ebfc62d6: from storage DS-2e50bd0f-8c2e-4f8e-b277-b0afe58eedd8 node DatanodeRegistration(127.0.0.1:43619, datanodeUuid=079a8ecb-ae18-47ab-b528-e7a0ef038e0d, infoPort=37567, infoSecurePort=0, ipcPort=38303, storageInfo=lv=-57;cid=testClusterID;nsid=1349542505;c=1732077105287), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:31:45,776 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5fcc3d81fd9a934d with lease ID 0x5efa75d6ebfc62d6: Processing first storage report for DS-c949f629-4b90-427b-8f4d-0288ec8059d3 from datanode DatanodeRegistration(127.0.0.1:43619, datanodeUuid=079a8ecb-ae18-47ab-b528-e7a0ef038e0d, infoPort=37567, infoSecurePort=0, ipcPort=38303, storageInfo=lv=-57;cid=testClusterID;nsid=1349542505;c=1732077105287) 2024-11-20T04:31:45,776 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5fcc3d81fd9a934d with lease ID 0x5efa75d6ebfc62d6: from storage DS-c949f629-4b90-427b-8f4d-0288ec8059d3 node DatanodeRegistration(127.0.0.1:43619, datanodeUuid=079a8ecb-ae18-47ab-b528-e7a0ef038e0d, infoPort=37567, infoSecurePort=0, ipcPort=38303, storageInfo=lv=-57;cid=testClusterID;nsid=1349542505;c=1732077105287), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:31:45,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ae6275a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/java.io.tmpdir/jetty-localhost-42581-hadoop-hdfs-3_4_1-tests_jar-_-any-1364740555973313772/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:31:45,812 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a34980e{HTTP/1.1, (http/1.1)}{localhost:42581} 2024-11-20T04:31:45,812 INFO [Time-limited test {}] server.Server(415): Started @239891ms 2024-11-20T04:31:45,813 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:31:45,907 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/data/data3/current/BP-1347059229-172.17.0.2-1732077105287/current, will proceed with Du for space computation calculation, 2024-11-20T04:31:45,907 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/data/data4/current/BP-1347059229-172.17.0.2-1732077105287/current, will proceed with Du for space computation calculation, 2024-11-20T04:31:45,924 WARN [Thread-1966 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:31:45,926 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa72b1e66f7c169b1 with lease ID 0x5efa75d6ebfc62d7: Processing first storage report for DS-f8b961ab-c362-440e-b9d3-73c3419f746e from datanode DatanodeRegistration(127.0.0.1:41351, datanodeUuid=71d11684-2788-4c06-9f50-bd10d0468c2f, infoPort=37249, infoSecurePort=0, ipcPort=38647, storageInfo=lv=-57;cid=testClusterID;nsid=1349542505;c=1732077105287) 2024-11-20T04:31:45,926 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa72b1e66f7c169b1 with lease ID 0x5efa75d6ebfc62d7: from storage DS-f8b961ab-c362-440e-b9d3-73c3419f746e node DatanodeRegistration(127.0.0.1:41351, datanodeUuid=71d11684-2788-4c06-9f50-bd10d0468c2f, infoPort=37249, infoSecurePort=0, ipcPort=38647, storageInfo=lv=-57;cid=testClusterID;nsid=1349542505;c=1732077105287), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T04:31:45,926 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa72b1e66f7c169b1 with lease ID 0x5efa75d6ebfc62d7: Processing first storage report for DS-ae70970e-a417-4397-b889-9ba48d619bd4 from datanode DatanodeRegistration(127.0.0.1:41351, datanodeUuid=71d11684-2788-4c06-9f50-bd10d0468c2f, infoPort=37249, infoSecurePort=0, ipcPort=38647, storageInfo=lv=-57;cid=testClusterID;nsid=1349542505;c=1732077105287) 2024-11-20T04:31:45,926 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa72b1e66f7c169b1 with lease ID 0x5efa75d6ebfc62d7: from storage DS-ae70970e-a417-4397-b889-9ba48d619bd4 node DatanodeRegistration(127.0.0.1:41351, datanodeUuid=71d11684-2788-4c06-9f50-bd10d0468c2f, infoPort=37249, infoSecurePort=0, ipcPort=38647, storageInfo=lv=-57;cid=testClusterID;nsid=1349542505;c=1732077105287), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:31:45,935 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b 2024-11-20T04:31:45,937 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/zookeeper_0, clientPort=50236, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T04:31:45,938 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50236 2024-11-20T04:31:45,938 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:31:45,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:31:45,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:31:45,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:31:45,948 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6 with version=8 2024-11-20T04:31:45,948 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/hbase-staging 2024-11-20T04:31:45,950 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:31:45,950 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:31:45,950 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:31:45,950 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:31:45,950 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:31:45,950 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:31:45,950 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T04:31:45,950 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:31:45,951 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39875 2024-11-20T04:31:45,952 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39875 connecting to ZooKeeper ensemble=127.0.0.1:50236 2024-11-20T04:31:45,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:398750x0, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:31:45,958 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39875-0x10133a51da00000 connected 2024-11-20T04:31:45,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:31:45,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:31:45,976 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:31:45,976 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6, hbase.cluster.distributed=false 2024-11-20T04:31:45,978 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:31:45,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39875 2024-11-20T04:31:45,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39875 2024-11-20T04:31:45,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39875 2024-11-20T04:31:45,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39875 2024-11-20T04:31:45,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39875 2024-11-20T04:31:45,993 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:31:45,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:31:45,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:31:45,994 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:31:45,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:31:45,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:31:45,994 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T04:31:45,994 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:31:45,994 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35439 2024-11-20T04:31:45,995 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35439 connecting to ZooKeeper ensemble=127.0.0.1:50236 2024-11-20T04:31:45,996 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:31:45,997 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:31:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:354390x0, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:31:46,001 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35439-0x10133a51da00001 connected 2024-11-20T04:31:46,001 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:31:46,001 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T04:31:46,002 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T04:31:46,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T04:31:46,003 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:31:46,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35439 2024-11-20T04:31:46,004 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35439 2024-11-20T04:31:46,004 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35439 2024-11-20T04:31:46,004 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35439 2024-11-20T04:31:46,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35439 2024-11-20T04:31:46,016 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c2a32e16c274:39875 2024-11-20T04:31:46,016 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c2a32e16c274,39875,1732077105950 2024-11-20T04:31:46,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:31:46,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:31:46,018 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c2a32e16c274,39875,1732077105950 2024-11-20T04:31:46,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T04:31:46,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,023 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T04:31:46,023 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c2a32e16c274,39875,1732077105950 from backup master directory 2024-11-20T04:31:46,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c2a32e16c274,39875,1732077105950 2024-11-20T04:31:46,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:31:46,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:31:46,025 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:31:46,025 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c2a32e16c274,39875,1732077105950 2024-11-20T04:31:46,029 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/hbase.id] with ID: cf509326-2a76-454b-aed7-9e82652b76be 2024-11-20T04:31:46,029 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/.tmp/hbase.id 2024-11-20T04:31:46,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:31:46,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:31:46,035 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/.tmp/hbase.id]:[hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/hbase.id] 2024-11-20T04:31:46,045 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:31:46,045 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T04:31:46,046 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T04:31:46,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:31:46,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:31:46,054 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:31:46,055 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T04:31:46,055 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:31:46,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:31:46,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:31:46,062 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store 2024-11-20T04:31:46,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:31:46,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:31:46,068 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:31:46,068 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:31:46,068 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:31:46,068 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:31:46,068 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:31:46,068 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:31:46,068 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:31:46,068 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732077106068Disabling compacts and flushes for region at 1732077106068Disabling writes for close at 1732077106068Writing region close event to WAL at 1732077106068Closed at 1732077106068 2024-11-20T04:31:46,069 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/.initializing 2024-11-20T04:31:46,069 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/WALs/c2a32e16c274,39875,1732077105950 2024-11-20T04:31:46,071 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C39875%2C1732077105950, suffix=, logDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/WALs/c2a32e16c274,39875,1732077105950, archiveDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/oldWALs, maxLogs=10 2024-11-20T04:31:46,071 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C39875%2C1732077105950.1732077106071 2024-11-20T04:31:46,075 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/WALs/c2a32e16c274,39875,1732077105950/c2a32e16c274%2C39875%2C1732077105950.1732077106071 2024-11-20T04:31:46,076 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37567:37567),(127.0.0.1/127.0.0.1:37249:37249)] 2024-11-20T04:31:46,076 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:31:46,077 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:31:46,077 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,077 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T04:31:46,079 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:31:46,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T04:31:46,080 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,081 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:31:46,081 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T04:31:46,082 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:31:46,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T04:31:46,083 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,084 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:31:46,084 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,084 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,085 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,086 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,086 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,086 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T04:31:46,087 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:31:46,089 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:31:46,089 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769465, jitterRate=-0.021575316786766052}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T04:31:46,090 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732077106077Initializing all the Stores at 1732077106077Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077106077Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077106077Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077106077Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077106077Cleaning up temporary data from old regions at 1732077106086 (+9 ms)Region opened successfully at 1732077106090 (+4 ms) 2024-11-20T04:31:46,090 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T04:31:46,092 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d227a83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:31:46,093 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T04:31:46,093 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T04:31:46,093 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T04:31:46,093 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T04:31:46,094 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T04:31:46,094 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T04:31:46,094 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T04:31:46,096 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T04:31:46,097 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T04:31:46,098 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T04:31:46,098 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T04:31:46,099 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T04:31:46,102 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T04:31:46,103 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T04:31:46,103 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T04:31:46,104 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T04:31:46,105 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T04:31:46,106 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T04:31:46,107 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T04:31:46,108 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T04:31:46,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:31:46,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:31:46,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,112 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c2a32e16c274,39875,1732077105950, sessionid=0x10133a51da00000, setting cluster-up flag (Was=false) 2024-11-20T04:31:46,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,120 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T04:31:46,121 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,39875,1732077105950 2024-11-20T04:31:46,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,132 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T04:31:46,133 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,39875,1732077105950 2024-11-20T04:31:46,134 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T04:31:46,136 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T04:31:46,136 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T04:31:46,136 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T04:31:46,137 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c2a32e16c274,39875,1732077105950 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T04:31:46,138 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:31:46,138 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:31:46,138 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:31:46,138 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:31:46,138 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c2a32e16c274:0, corePoolSize=10, maxPoolSize=10 2024-11-20T04:31:46,138 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,138 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:31:46,138 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,140 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:31:46,140 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T04:31:46,141 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,141 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T04:31:46,143 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732077136143 2024-11-20T04:31:46,143 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T04:31:46,143 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T04:31:46,143 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T04:31:46,143 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T04:31:46,143 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T04:31:46,143 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T04:31:46,144 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,146 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T04:31:46,146 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T04:31:46,146 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T04:31:46,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T04:31:46,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T04:31:46,148 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077106148,5,FailOnTimeoutGroup] 2024-11-20T04:31:46,148 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077106148,5,FailOnTimeoutGroup] 2024-11-20T04:31:46,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T04:31:46,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:31:46,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:31:46,150 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T04:31:46,151 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6 2024-11-20T04:31:46,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:31:46,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:31:46,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:31:46,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:31:46,162 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:31:46,162 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:31:46,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:31:46,164 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:31:46,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:31:46,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:31:46,165 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:31:46,165 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:31:46,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:31:46,167 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:31:46,167 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:31:46,167 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:31:46,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740 2024-11-20T04:31:46,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740 2024-11-20T04:31:46,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:31:46,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:31:46,169 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:31:46,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:31:46,172 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:31:46,172 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880397, jitterRate=0.11948315799236298}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:31:46,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732077106160Initializing all the Stores at 1732077106160Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077106160Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077106160Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077106160Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077106160Cleaning up temporary data from old regions at 1732077106169 (+9 ms)Region opened successfully at 1732077106172 (+3 ms) 2024-11-20T04:31:46,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:31:46,172 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:31:46,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:31:46,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:31:46,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:31:46,173 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:31:46,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732077106172Disabling compacts and flushes for region at 1732077106172Disabling writes for close at 1732077106173 (+1 ms)Writing region close event to WAL at 1732077106173Closed at 1732077106173 2024-11-20T04:31:46,174 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:31:46,174 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T04:31:46,174 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T04:31:46,175 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:31:46,177 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T04:31:46,206 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(746): ClusterId : cf509326-2a76-454b-aed7-9e82652b76be 2024-11-20T04:31:46,206 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T04:31:46,209 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T04:31:46,209 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T04:31:46,212 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T04:31:46,212 DEBUG [RS:0;c2a32e16c274:35439 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@581a9cd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:31:46,224 DEBUG [RS:0;c2a32e16c274:35439 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c2a32e16c274:35439 2024-11-20T04:31:46,224 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T04:31:46,224 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T04:31:46,224 DEBUG [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T04:31:46,225 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2a32e16c274,39875,1732077105950 with port=35439, startcode=1732077105993 2024-11-20T04:31:46,225 DEBUG [RS:0;c2a32e16c274:35439 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T04:31:46,227 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35517, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T04:31:46,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39875 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2a32e16c274,35439,1732077105993 2024-11-20T04:31:46,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39875 {}] master.ServerManager(517): Registering regionserver=c2a32e16c274,35439,1732077105993 2024-11-20T04:31:46,229 DEBUG [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6 2024-11-20T04:31:46,229 DEBUG [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36491 2024-11-20T04:31:46,229 DEBUG [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T04:31:46,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:31:46,231 DEBUG [RS:0;c2a32e16c274:35439 {}] zookeeper.ZKUtil(111): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2a32e16c274,35439,1732077105993 2024-11-20T04:31:46,231 WARN [RS:0;c2a32e16c274:35439 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:31:46,231 INFO [RS:0;c2a32e16c274:35439 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:31:46,231 DEBUG [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993 2024-11-20T04:31:46,231 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2a32e16c274,35439,1732077105993] 2024-11-20T04:31:46,234 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T04:31:46,236 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T04:31:46,236 INFO [RS:0;c2a32e16c274:35439 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T04:31:46,236 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,236 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T04:31:46,237 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T04:31:46,237 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:31:46,237 DEBUG [RS:0;c2a32e16c274:35439 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:31:46,240 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,240 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,240 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,240 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,240 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,240 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,35439,1732077105993-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:31:46,255 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T04:31:46,255 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,35439,1732077105993-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,255 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,255 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.Replication(171): c2a32e16c274,35439,1732077105993 started 2024-11-20T04:31:46,269 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,269 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(1482): Serving as c2a32e16c274,35439,1732077105993, RpcServer on c2a32e16c274/172.17.0.2:35439, sessionid=0x10133a51da00001 2024-11-20T04:31:46,269 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T04:31:46,269 DEBUG [RS:0;c2a32e16c274:35439 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2a32e16c274,35439,1732077105993 2024-11-20T04:31:46,269 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,35439,1732077105993' 2024-11-20T04:31:46,269 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T04:31:46,270 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T04:31:46,270 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T04:31:46,270 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T04:31:46,270 DEBUG [RS:0;c2a32e16c274:35439 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2a32e16c274,35439,1732077105993 2024-11-20T04:31:46,270 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,35439,1732077105993' 2024-11-20T04:31:46,270 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T04:31:46,271 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T04:31:46,271 DEBUG [RS:0;c2a32e16c274:35439 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T04:31:46,271 INFO [RS:0;c2a32e16c274:35439 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T04:31:46,271 INFO [RS:0;c2a32e16c274:35439 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T04:31:46,327 WARN [c2a32e16c274:39875 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T04:31:46,373 INFO [RS:0;c2a32e16c274:35439 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C35439%2C1732077105993, suffix=, logDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993, archiveDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/oldWALs, maxLogs=32 2024-11-20T04:31:46,373 INFO [RS:0;c2a32e16c274:35439 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35439%2C1732077105993.1732077106373 2024-11-20T04:31:46,379 INFO [RS:0;c2a32e16c274:35439 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993/c2a32e16c274%2C35439%2C1732077105993.1732077106373 2024-11-20T04:31:46,380 DEBUG [RS:0;c2a32e16c274:35439 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37249:37249),(127.0.0.1/127.0.0.1:37567:37567)] 2024-11-20T04:31:46,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:46,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:46,577 DEBUG [c2a32e16c274:39875 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T04:31:46,578 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c2a32e16c274,35439,1732077105993 2024-11-20T04:31:46,579 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,35439,1732077105993, state=OPENING 2024-11-20T04:31:46,580 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T04:31:46,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:31:46,583 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:31:46,583 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:31:46,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,35439,1732077105993}] 2024-11-20T04:31:46,583 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:31:46,736 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T04:31:46,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48843, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T04:31:46,741 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T04:31:46,741 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:31:46,743 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C35439%2C1732077105993.meta, suffix=.meta, logDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993, archiveDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/oldWALs, maxLogs=32 2024-11-20T04:31:46,743 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35439%2C1732077105993.meta.1732077106743.meta 2024-11-20T04:31:46,750 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993/c2a32e16c274%2C35439%2C1732077105993.meta.1732077106743.meta 2024-11-20T04:31:46,756 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37249:37249),(127.0.0.1/127.0.0.1:37567:37567)] 2024-11-20T04:31:46,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:31:46,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T04:31:46,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T04:31:46,761 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T04:31:46,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T04:31:46,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:31:46,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T04:31:46,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T04:31:46,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:31:46,763 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:31:46,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:31:46,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:31:46,764 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:31:46,764 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:31:46,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:31:46,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:31:46,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:31:46,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:31:46,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:31:46,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:31:46,767 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:31:46,767 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740 2024-11-20T04:31:46,768 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740 2024-11-20T04:31:46,769 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:31:46,769 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:31:46,770 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:31:46,771 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:31:46,771 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794633, jitterRate=0.010428309440612793}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:31:46,771 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T04:31:46,772 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732077106761Writing region info on filesystem at 1732077106761Initializing all the Stores at 1732077106762 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077106762Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077106762Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077106762Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077106762Cleaning up temporary data from old regions at 1732077106769 (+7 ms)Running coprocessor post-open hooks at 1732077106771 (+2 ms)Region opened successfully at 1732077106772 (+1 ms) 2024-11-20T04:31:46,773 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732077106736 2024-11-20T04:31:46,775 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T04:31:46,775 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T04:31:46,776 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,35439,1732077105993 2024-11-20T04:31:46,776 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,35439,1732077105993, state=OPEN 2024-11-20T04:31:46,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:31:46,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:31:46,783 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c2a32e16c274,35439,1732077105993 2024-11-20T04:31:46,783 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:31:46,783 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:31:46,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T04:31:46,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,35439,1732077105993 in 200 msec 2024-11-20T04:31:46,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T04:31:46,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-11-20T04:31:46,788 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:31:46,788 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T04:31:46,790 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:31:46,790 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,35439,1732077105993, seqNum=-1] 2024-11-20T04:31:46,790 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:31:46,791 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58671, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:31:46,796 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 660 msec 2024-11-20T04:31:46,796 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732077106796, completionTime=-1 2024-11-20T04:31:46,796 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T04:31:46,796 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732077166798 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732077226798 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39875,1732077105950-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39875,1732077105950-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39875,1732077105950-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c2a32e16c274:39875, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,798 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,800 DEBUG [master/c2a32e16c274:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T04:31:46,802 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.777sec 2024-11-20T04:31:46,802 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T04:31:46,802 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T04:31:46,802 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T04:31:46,802 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T04:31:46,802 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T04:31:46,802 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39875,1732077105950-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:31:46,802 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39875,1732077105950-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T04:31:46,804 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T04:31:46,804 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T04:31:46,804 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,39875,1732077105950-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:31:46,806 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b81e396, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:31:46,806 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c2a32e16c274,39875,-1 for getting cluster id 2024-11-20T04:31:46,806 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T04:31:46,808 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cf509326-2a76-454b-aed7-9e82652b76be' 2024-11-20T04:31:46,808 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T04:31:46,808 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cf509326-2a76-454b-aed7-9e82652b76be" 2024-11-20T04:31:46,808 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@99463c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:31:46,808 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c2a32e16c274,39875,-1] 2024-11-20T04:31:46,809 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T04:31:46,809 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:31:46,810 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41898, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T04:31:46,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@196c243a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:31:46,810 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:31:46,811 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,35439,1732077105993, seqNum=-1] 2024-11-20T04:31:46,812 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:31:46,812 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52690, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:31:46,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c2a32e16c274,39875,1732077105950 2024-11-20T04:31:46,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:31:46,816 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T04:31:46,817 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T04:31:46,817 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is c2a32e16c274,39875,1732077105950 2024-11-20T04:31:46,817 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@75253dee 2024-11-20T04:31:46,818 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T04:31:46,819 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41906, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T04:31:46,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39875 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T04:31:46,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39875 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T04:31:46,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39875 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:31:46,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39875 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-20T04:31:46,822 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T04:31:46,822 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:46,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39875 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-20T04:31:46,823 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T04:31:46,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39875 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:31:46,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741835_1011 (size=381) 2024-11-20T04:31:46,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741835_1011 (size=381) 2024-11-20T04:31:46,833 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f5df11b7e0731dc76c8b1dee4b367d32, NAME => 'TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6 2024-11-20T04:31:46,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741836_1012 (size=64) 2024-11-20T04:31:46,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741836_1012 (size=64) 2024-11-20T04:31:46,839 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:31:46,839 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing f5df11b7e0731dc76c8b1dee4b367d32, disabling compactions & flushes 2024-11-20T04:31:46,839 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:31:46,839 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:31:46,839 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. after waiting 0 ms 2024-11-20T04:31:46,839 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:31:46,839 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:31:46,839 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for f5df11b7e0731dc76c8b1dee4b367d32: Waiting for close lock at 1732077106839Disabling compacts and flushes for region at 1732077106839Disabling writes for close at 1732077106839Writing region close event to WAL at 1732077106839Closed at 1732077106839 2024-11-20T04:31:46,841 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T04:31:46,841 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732077106841"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732077106841"}]},"ts":"1732077106841"} 2024-11-20T04:31:46,844 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T04:31:46,845 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T04:31:46,845 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732077106845"}]},"ts":"1732077106845"} 2024-11-20T04:31:46,847 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-20T04:31:46,847 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5df11b7e0731dc76c8b1dee4b367d32, ASSIGN}] 2024-11-20T04:31:46,849 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5df11b7e0731dc76c8b1dee4b367d32, ASSIGN 2024-11-20T04:31:46,850 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5df11b7e0731dc76c8b1dee4b367d32, ASSIGN; state=OFFLINE, location=c2a32e16c274,35439,1732077105993; forceNewPlan=false, retain=false 2024-11-20T04:31:47,000 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f5df11b7e0731dc76c8b1dee4b367d32, regionState=OPENING, regionLocation=c2a32e16c274,35439,1732077105993 2024-11-20T04:31:47,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5df11b7e0731dc76c8b1dee4b367d32, ASSIGN because future has completed 2024-11-20T04:31:47,003 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993}] 2024-11-20T04:31:47,159 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:31:47,159 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f5df11b7e0731dc76c8b1dee4b367d32, NAME => 'TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:31:47,160 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,160 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:31:47,160 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,160 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,161 INFO [StoreOpener-f5df11b7e0731dc76c8b1dee4b367d32-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,162 INFO [StoreOpener-f5df11b7e0731dc76c8b1dee4b367d32-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f5df11b7e0731dc76c8b1dee4b367d32 columnFamilyName info 2024-11-20T04:31:47,163 DEBUG [StoreOpener-f5df11b7e0731dc76c8b1dee4b367d32-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:31:47,163 INFO [StoreOpener-f5df11b7e0731dc76c8b1dee4b367d32-1 {}] regionserver.HStore(327): Store=f5df11b7e0731dc76c8b1dee4b367d32/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:31:47,163 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,164 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,164 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,164 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,164 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,165 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,167 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:31:47,167 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f5df11b7e0731dc76c8b1dee4b367d32; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796095, jitterRate=0.012288063764572144}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T04:31:47,167 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:47,168 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f5df11b7e0731dc76c8b1dee4b367d32: Running coprocessor pre-open hook at 1732077107160Writing region info on filesystem at 1732077107160Initializing all the Stores at 1732077107161 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077107161Cleaning up temporary data from old regions at 1732077107164 (+3 ms)Running coprocessor post-open hooks at 1732077107167 (+3 ms)Region opened successfully at 1732077107168 (+1 ms) 2024-11-20T04:31:47,169 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., pid=6, masterSystemTime=1732077107156 2024-11-20T04:31:47,171 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:31:47,171 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:31:47,172 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f5df11b7e0731dc76c8b1dee4b367d32, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,35439,1732077105993 2024-11-20T04:31:47,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993 because future has completed 2024-11-20T04:31:47,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T04:31:47,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993 in 172 msec 2024-11-20T04:31:47,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T04:31:47,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5df11b7e0731dc76c8b1dee4b367d32, ASSIGN in 330 msec 2024-11-20T04:31:47,180 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T04:31:47,180 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732077107180"}]},"ts":"1732077107180"} 2024-11-20T04:31:47,182 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-20T04:31:47,183 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T04:31:47,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 364 msec 2024-11-20T04:31:47,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:47,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:48,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:48,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:49,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:49,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:49,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:49,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,324 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T04:31:50,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:50,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:50,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:51,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-20T04:31:51,223 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T04:31:51,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T04:31:51,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:51,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:52,234 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T04:31:52,235 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-20T04:31:52,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:52,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:53,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:53,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:54,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:54,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:55,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:55,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:56,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:56,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:56,726 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T04:31:56,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:31:56,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39875 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T04:31:56,882 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-20T04:31:56,882 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-20T04:31:56,884 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-20T04:31:56,884 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:31:56,887 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., hostname=c2a32e16c274,35439,1732077105993, seqNum=2] 2024-11-20T04:31:56,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:56,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5df11b7e0731dc76c8b1dee4b367d32 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:31:56,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/9b86c2f446d64716a29b65d83b62fae6 is 1080, key is row0001/info:/1732077116887/Put/seqid=0 2024-11-20T04:31:56,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741837_1013 (size=12509) 2024-11-20T04:31:56,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741837_1013 (size=12509) 2024-11-20T04:31:56,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/9b86c2f446d64716a29b65d83b62fae6 2024-11-20T04:31:56,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/9b86c2f446d64716a29b65d83b62fae6 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/9b86c2f446d64716a29b65d83b62fae6 2024-11-20T04:31:56,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/9b86c2f446d64716a29b65d83b62fae6, entries=7, sequenceid=11, filesize=12.2 K 2024-11-20T04:31:56,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for f5df11b7e0731dc76c8b1dee4b367d32 in 37ms, sequenceid=11, compaction requested=false 2024-11-20T04:31:56,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5df11b7e0731dc76c8b1dee4b367d32: 2024-11-20T04:31:56,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:56,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5df11b7e0731dc76c8b1dee4b367d32 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-20T04:31:56,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/89b7e4dfc5794e3bab9ea716e36d7e61 is 1080, key is row0008/info:/1732077116898/Put/seqid=0 2024-11-20T04:31:56,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741838_1014 (size=28684) 2024-11-20T04:31:56,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741838_1014 (size=28684) 2024-11-20T04:31:56,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/89b7e4dfc5794e3bab9ea716e36d7e61 2024-11-20T04:31:56,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/89b7e4dfc5794e3bab9ea716e36d7e61 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/89b7e4dfc5794e3bab9ea716e36d7e61 2024-11-20T04:31:56,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/89b7e4dfc5794e3bab9ea716e36d7e61, entries=22, sequenceid=36, filesize=28.0 K 2024-11-20T04:31:56,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for f5df11b7e0731dc76c8b1dee4b367d32 in 22ms, sequenceid=36, compaction requested=false 2024-11-20T04:31:56,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5df11b7e0731dc76c8b1dee4b367d32: 2024-11-20T04:31:56,957 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.2 K, sizeToCheck=16.0 K 2024-11-20T04:31:56,957 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:31:56,958 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/89b7e4dfc5794e3bab9ea716e36d7e61 because midkey is the same as first or last row 2024-11-20T04:31:57,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:57,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:58,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:58,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:58,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:58,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5df11b7e0731dc76c8b1dee4b367d32 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:31:58,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/d63e5cab4c7d41d0963f70b4b2ca194e is 1080, key is row0030/info:/1732077116936/Put/seqid=0 2024-11-20T04:31:58,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741839_1015 (size=12509) 2024-11-20T04:31:58,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741839_1015 (size=12509) 2024-11-20T04:31:58,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=46 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/d63e5cab4c7d41d0963f70b4b2ca194e 2024-11-20T04:31:58,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/d63e5cab4c7d41d0963f70b4b2ca194e as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/d63e5cab4c7d41d0963f70b4b2ca194e 2024-11-20T04:31:58,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/d63e5cab4c7d41d0963f70b4b2ca194e, entries=7, sequenceid=46, filesize=12.2 K 2024-11-20T04:31:58,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for f5df11b7e0731dc76c8b1dee4b367d32 in 24ms, sequenceid=46, compaction requested=true 2024-11-20T04:31:58,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5df11b7e0731dc76c8b1dee4b367d32: 2024-11-20T04:31:58,971 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=52.4 K, sizeToCheck=16.0 K 2024-11-20T04:31:58,971 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:31:58,971 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/89b7e4dfc5794e3bab9ea716e36d7e61 because midkey is the same as first or last row 2024-11-20T04:31:58,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5df11b7e0731dc76c8b1dee4b367d32:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:31:58,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:31:58,971 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:31:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:31:58,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5df11b7e0731dc76c8b1dee4b367d32 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-20T04:31:58,972 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53702 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:31:58,972 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1541): f5df11b7e0731dc76c8b1dee4b367d32/info is initiating minor compaction (all files) 2024-11-20T04:31:58,973 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5df11b7e0731dc76c8b1dee4b367d32/info in TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:31:58,973 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/9b86c2f446d64716a29b65d83b62fae6, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/89b7e4dfc5794e3bab9ea716e36d7e61, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/d63e5cab4c7d41d0963f70b4b2ca194e] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp, totalSize=52.4 K 2024-11-20T04:31:58,973 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b86c2f446d64716a29b65d83b62fae6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732077116887 2024-11-20T04:31:58,974 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 89b7e4dfc5794e3bab9ea716e36d7e61, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=36, earliestPutTs=1732077116898 2024-11-20T04:31:58,974 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting d63e5cab4c7d41d0963f70b4b2ca194e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1732077116936 2024-11-20T04:31:58,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/2954386a2b16426c8d261f8020407ac0 is 1080, key is row0037/info:/1732077118948/Put/seqid=0 2024-11-20T04:31:58,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741840_1016 (size=21141) 2024-11-20T04:31:58,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741840_1016 (size=21141) 2024-11-20T04:31:58,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/2954386a2b16426c8d261f8020407ac0 2024-11-20T04:31:58,988 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5df11b7e0731dc76c8b1dee4b367d32#info#compaction#59 average throughput is 18.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:31:58,989 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/c1abcdf4165c4f4586d589ae6f7a1bde is 1080, key is row0001/info:/1732077116887/Put/seqid=0 2024-11-20T04:31:58,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/2954386a2b16426c8d261f8020407ac0 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/2954386a2b16426c8d261f8020407ac0 2024-11-20T04:31:58,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741841_1017 (size=43901) 2024-11-20T04:31:58,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741841_1017 (size=43901) 2024-11-20T04:31:58,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/2954386a2b16426c8d261f8020407ac0, entries=15, sequenceid=64, filesize=20.6 K 2024-11-20T04:31:58,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for f5df11b7e0731dc76c8b1dee4b367d32 in 26ms, sequenceid=64, compaction requested=false 2024-11-20T04:31:58,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5df11b7e0731dc76c8b1dee4b367d32: 2024-11-20T04:31:58,998 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-11-20T04:31:58,998 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:31:58,998 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/89b7e4dfc5794e3bab9ea716e36d7e61 because midkey is the same as first or last row 2024-11-20T04:31:58,999 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/c1abcdf4165c4f4586d589ae6f7a1bde as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/c1abcdf4165c4f4586d589ae6f7a1bde 2024-11-20T04:31:59,004 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5df11b7e0731dc76c8b1dee4b367d32/info of f5df11b7e0731dc76c8b1dee4b367d32 into c1abcdf4165c4f4586d589ae6f7a1bde(size=42.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:31:59,004 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5df11b7e0731dc76c8b1dee4b367d32: 2024-11-20T04:31:59,004 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., storeName=f5df11b7e0731dc76c8b1dee4b367d32/info, priority=13, startTime=1732077118971; duration=0sec 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/c1abcdf4165c4f4586d589ae6f7a1bde because midkey is the same as first or last row 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/c1abcdf4165c4f4586d589ae6f7a1bde because midkey is the same as first or last row 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/c1abcdf4165c4f4586d589ae6f7a1bde because midkey is the same as first or last row 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:31:59,005 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5df11b7e0731dc76c8b1dee4b367d32:info 2024-11-20T04:31:59,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:31:59,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:00,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:00,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:00,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:00,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5df11b7e0731dc76c8b1dee4b367d32 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-20T04:32:01,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/569a0126e6ad43daa33789ac5f2cffd5 is 1080, key is row0052/info:/1732077118973/Put/seqid=0 2024-11-20T04:32:01,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741842_1018 (size=20064) 2024-11-20T04:32:01,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741842_1018 (size=20064) 2024-11-20T04:32:01,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/569a0126e6ad43daa33789ac5f2cffd5 2024-11-20T04:32:01,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/569a0126e6ad43daa33789ac5f2cffd5 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/569a0126e6ad43daa33789ac5f2cffd5 2024-11-20T04:32:01,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/569a0126e6ad43daa33789ac5f2cffd5, entries=14, sequenceid=82, filesize=19.6 K 2024-11-20T04:32:01,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for f5df11b7e0731dc76c8b1dee4b367d32 in 22ms, sequenceid=82, compaction requested=true 2024-11-20T04:32:01,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5df11b7e0731dc76c8b1dee4b367d32: 2024-11-20T04:32:01,021 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-20T04:32:01,021 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:32:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,021 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/c1abcdf4165c4f4586d589ae6f7a1bde because midkey is the same as first or last row 2024-11-20T04:32:01,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5df11b7e0731dc76c8b1dee4b367d32:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:32:01,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:01,021 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:32:01,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5df11b7e0731dc76c8b1dee4b367d32 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-20T04:32:01,022 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:32:01,022 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1541): f5df11b7e0731dc76c8b1dee4b367d32/info is initiating minor compaction (all files) 2024-11-20T04:32:01,022 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5df11b7e0731dc76c8b1dee4b367d32/info in TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:32:01,022 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/c1abcdf4165c4f4586d589ae6f7a1bde, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/2954386a2b16426c8d261f8020407ac0, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/569a0126e6ad43daa33789ac5f2cffd5] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp, totalSize=83.1 K 2024-11-20T04:32:01,023 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting c1abcdf4165c4f4586d589ae6f7a1bde, keycount=36, bloomtype=ROW, size=42.9 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1732077116887 2024-11-20T04:32:01,023 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2954386a2b16426c8d261f8020407ac0, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732077118948 2024-11-20T04:32:01,024 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 569a0126e6ad43daa33789ac5f2cffd5, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732077118973 2024-11-20T04:32:01,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/8e607ccf19454c75bce5c00547b983f4 is 1080, key is row0066/info:/1732077120999/Put/seqid=0 2024-11-20T04:32:01,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741843_1019 (size=20064) 2024-11-20T04:32:01,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741843_1019 (size=20064) 2024-11-20T04:32:01,041 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5df11b7e0731dc76c8b1dee4b367d32#info#compaction#62 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:32:01,041 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/6f6a8e9308f54fff8a77403e7ed15245 is 1080, key is row0001/info:/1732077116887/Put/seqid=0 2024-11-20T04:32:01,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741844_1020 (size=75378) 2024-11-20T04:32:01,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741844_1020 (size=75378) 2024-11-20T04:32:01,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-20T04:32:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52690 deadline: 1732077131048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993 2024-11-20T04:32:01,052 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/6f6a8e9308f54fff8a77403e7ed15245 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/6f6a8e9308f54fff8a77403e7ed15245 2024-11-20T04:32:01,059 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5df11b7e0731dc76c8b1dee4b367d32/info of f5df11b7e0731dc76c8b1dee4b367d32 into 6f6a8e9308f54fff8a77403e7ed15245(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:32:01,059 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5df11b7e0731dc76c8b1dee4b367d32: 2024-11-20T04:32:01,059 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., storeName=f5df11b7e0731dc76c8b1dee4b367d32/info, priority=13, startTime=1732077121021; duration=0sec 2024-11-20T04:32:01,060 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-20T04:32:01,060 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:32:01,060 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-20T04:32:01,060 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:32:01,060 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-20T04:32:01,060 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T04:32:01,061 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:01,061 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:01,061 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5df11b7e0731dc76c8b1dee4b367d32:info 2024-11-20T04:32:01,062 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39875 {}] assignment.AssignmentManager(1355): Split request from c2a32e16c274,35439,1732077105993, parent={ENCODED => f5df11b7e0731dc76c8b1dee4b367d32, NAME => 'TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-20T04:32:01,067 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39875 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=c2a32e16c274,35439,1732077105993 2024-11-20T04:32:01,073 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., hostname=c2a32e16c274,35439,1732077105993, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., hostname=c2a32e16c274,35439,1732077105993, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T04:32:01,074 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., hostname=c2a32e16c274,35439,1732077105993, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T04:32:01,074 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., hostname=c2a32e16c274,35439,1732077105993, seqNum=2 because the exception is null or not the one we care about 2024-11-20T04:32:01,073 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39875 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=f5df11b7e0731dc76c8b1dee4b367d32, daughterA=066e06c5293d73afd0b554a99369f641, daughterB=0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,074 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=f5df11b7e0731dc76c8b1dee4b367d32, daughterA=066e06c5293d73afd0b554a99369f641, daughterB=0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,074 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=f5df11b7e0731dc76c8b1dee4b367d32, daughterA=066e06c5293d73afd0b554a99369f641, daughterB=0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,074 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=f5df11b7e0731dc76c8b1dee4b367d32, daughterA=066e06c5293d73afd0b554a99369f641, daughterB=0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,082 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5df11b7e0731dc76c8b1dee4b367d32, UNASSIGN}] 2024-11-20T04:32:01,083 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5df11b7e0731dc76c8b1dee4b367d32, UNASSIGN 2024-11-20T04:32:01,085 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=f5df11b7e0731dc76c8b1dee4b367d32, regionState=CLOSING, regionLocation=c2a32e16c274,35439,1732077105993 2024-11-20T04:32:01,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5df11b7e0731dc76c8b1dee4b367d32, UNASSIGN because future has completed 2024-11-20T04:32:01,087 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-20T04:32:01,088 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993}] 2024-11-20T04:32:01,246 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,246 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-20T04:32:01,247 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing f5df11b7e0731dc76c8b1dee4b367d32, disabling compactions & flushes 2024-11-20T04:32:01,247 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1993): waiting for 0 compactions & cache flush to complete for region TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:32:01,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:01,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:01,437 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/8e607ccf19454c75bce5c00547b983f4 2024-11-20T04:32:01,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/8e607ccf19454c75bce5c00547b983f4 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/8e607ccf19454c75bce5c00547b983f4 2024-11-20T04:32:01,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/8e607ccf19454c75bce5c00547b983f4, entries=14, sequenceid=99, filesize=19.6 K 2024-11-20T04:32:01,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for f5df11b7e0731dc76c8b1dee4b367d32 in 428ms, sequenceid=99, compaction requested=false 2024-11-20T04:32:01,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5df11b7e0731dc76c8b1dee4b367d32: 2024-11-20T04:32:01,449 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:32:01,449 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:32:01,449 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. after waiting 0 ms 2024-11-20T04:32:01,449 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:32:01,449 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing f5df11b7e0731dc76c8b1dee4b367d32 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-20T04:32:01,453 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/fbb92fd1341145f3a76514ce3a37041d is 1080, key is row0080/info:/1732077121022/Put/seqid=0 2024-11-20T04:32:01,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741845_1021 (size=21141) 2024-11-20T04:32:01,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741845_1021 (size=21141) 2024-11-20T04:32:01,461 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/fbb92fd1341145f3a76514ce3a37041d 2024-11-20T04:32:01,467 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/.tmp/info/fbb92fd1341145f3a76514ce3a37041d as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/fbb92fd1341145f3a76514ce3a37041d 2024-11-20T04:32:01,472 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/fbb92fd1341145f3a76514ce3a37041d, entries=15, sequenceid=118, filesize=20.6 K 2024-11-20T04:32:01,473 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for f5df11b7e0731dc76c8b1dee4b367d32 in 24ms, sequenceid=118, compaction requested=true 2024-11-20T04:32:01,474 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/9b86c2f446d64716a29b65d83b62fae6, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/89b7e4dfc5794e3bab9ea716e36d7e61, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/c1abcdf4165c4f4586d589ae6f7a1bde, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/d63e5cab4c7d41d0963f70b4b2ca194e, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/2954386a2b16426c8d261f8020407ac0, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/569a0126e6ad43daa33789ac5f2cffd5] to archive 2024-11-20T04:32:01,475 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T04:32:01,476 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/9b86c2f446d64716a29b65d83b62fae6 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/9b86c2f446d64716a29b65d83b62fae6 2024-11-20T04:32:01,477 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/89b7e4dfc5794e3bab9ea716e36d7e61 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/89b7e4dfc5794e3bab9ea716e36d7e61 2024-11-20T04:32:01,479 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/c1abcdf4165c4f4586d589ae6f7a1bde to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/c1abcdf4165c4f4586d589ae6f7a1bde 2024-11-20T04:32:01,480 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/d63e5cab4c7d41d0963f70b4b2ca194e to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/d63e5cab4c7d41d0963f70b4b2ca194e 2024-11-20T04:32:01,481 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/2954386a2b16426c8d261f8020407ac0 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/2954386a2b16426c8d261f8020407ac0 2024-11-20T04:32:01,482 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/569a0126e6ad43daa33789ac5f2cffd5 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/569a0126e6ad43daa33789ac5f2cffd5 2024-11-20T04:32:01,487 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=1 2024-11-20T04:32:01,488 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. 2024-11-20T04:32:01,488 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for f5df11b7e0731dc76c8b1dee4b367d32: Waiting for close lock at 1732077121247Running coprocessor pre-close hooks at 1732077121247Disabling compacts and flushes for region at 1732077121247Disabling writes for close at 1732077121449 (+202 ms)Obtaining lock to block concurrent updates at 1732077121449Preparing flush snapshotting stores in f5df11b7e0731dc76c8b1dee4b367d32 at 1732077121449Finished memstore snapshotting TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., syncing WAL and waiting on mvcc, flushsize=dataSize=16140, getHeapSize=17520, getOffHeapSize=0, getCellsCount=15 at 1732077121450 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. at 1732077121450Flushing f5df11b7e0731dc76c8b1dee4b367d32/info: creating writer at 1732077121450Flushing f5df11b7e0731dc76c8b1dee4b367d32/info: appending metadata at 1732077121453 (+3 ms)Flushing f5df11b7e0731dc76c8b1dee4b367d32/info: closing flushed file at 1732077121453Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32bcd706: reopening flushed file at 1732077121466 (+13 ms)Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for f5df11b7e0731dc76c8b1dee4b367d32 in 24ms, sequenceid=118, compaction requested=true at 1732077121473 (+7 ms)Writing region close event to WAL at 1732077121484 (+11 ms)Running coprocessor post-close hooks at 1732077121488 (+4 ms)Closed at 1732077121488 2024-11-20T04:32:01,490 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,491 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=f5df11b7e0731dc76c8b1dee4b367d32, regionState=CLOSED 2024-11-20T04:32:01,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993 because future has completed 2024-11-20T04:32:01,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-20T04:32:01,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure f5df11b7e0731dc76c8b1dee4b367d32, server=c2a32e16c274,35439,1732077105993 in 406 msec 2024-11-20T04:32:01,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-20T04:32:01,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f5df11b7e0731dc76c8b1dee4b367d32, UNASSIGN in 414 msec 2024-11-20T04:32:01,505 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:01,508 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=f5df11b7e0731dc76c8b1dee4b367d32, threads=3 2024-11-20T04:32:01,510 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/fbb92fd1341145f3a76514ce3a37041d for region: f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,510 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/8e607ccf19454c75bce5c00547b983f4 for region: f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,510 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/6f6a8e9308f54fff8a77403e7ed15245 for region: f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,522 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/fbb92fd1341145f3a76514ce3a37041d, top=true 2024-11-20T04:32:01,522 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/8e607ccf19454c75bce5c00547b983f4, top=true 2024-11-20T04:32:01,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741846_1022 (size=27) 2024-11-20T04:32:01,528 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-fbb92fd1341145f3a76514ce3a37041d for child: 0817b3a9a087281bb368b13ba62230f1, parent: f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,528 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-8e607ccf19454c75bce5c00547b983f4 for child: 0817b3a9a087281bb368b13ba62230f1, parent: f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741846_1022 (size=27) 2024-11-20T04:32:01,528 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/fbb92fd1341145f3a76514ce3a37041d for region: f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,528 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/8e607ccf19454c75bce5c00547b983f4 for region: f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741847_1023 (size=27) 2024-11-20T04:32:01,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741847_1023 (size=27) 2024-11-20T04:32:01,538 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/6f6a8e9308f54fff8a77403e7ed15245 for region: f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:01,540 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region f5df11b7e0731dc76c8b1dee4b367d32 Daughter A: [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32] storefiles, Daughter B: [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-8e607ccf19454c75bce5c00547b983f4, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-fbb92fd1341145f3a76514ce3a37041d] storefiles. 2024-11-20T04:32:01,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741848_1024 (size=71) 2024-11-20T04:32:01,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741848_1024 (size=71) 2024-11-20T04:32:01,550 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741849_1025 (size=71) 2024-11-20T04:32:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741849_1025 (size=71) 2024-11-20T04:32:01,563 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:01,572 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-11-20T04:32:01,574 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-11-20T04:32:01,576 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732077121576"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732077121576"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732077121576"}]},"ts":"1732077121576"} 2024-11-20T04:32:01,576 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732077121576"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732077121576"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732077121576"}]},"ts":"1732077121576"} 2024-11-20T04:32:01,576 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732077121576"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732077121576"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732077121576"}]},"ts":"1732077121576"} 2024-11-20T04:32:01,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=066e06c5293d73afd0b554a99369f641, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0817b3a9a087281bb368b13ba62230f1, ASSIGN}] 2024-11-20T04:32:01,595 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=066e06c5293d73afd0b554a99369f641, ASSIGN 2024-11-20T04:32:01,595 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0817b3a9a087281bb368b13ba62230f1, ASSIGN 2024-11-20T04:32:01,596 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=066e06c5293d73afd0b554a99369f641, ASSIGN; state=SPLITTING_NEW, location=c2a32e16c274,35439,1732077105993; forceNewPlan=false, retain=false 2024-11-20T04:32:01,596 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0817b3a9a087281bb368b13ba62230f1, ASSIGN; state=SPLITTING_NEW, location=c2a32e16c274,35439,1732077105993; forceNewPlan=false, retain=false 2024-11-20T04:32:01,746 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0817b3a9a087281bb368b13ba62230f1, regionState=OPENING, regionLocation=c2a32e16c274,35439,1732077105993 2024-11-20T04:32:01,746 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=066e06c5293d73afd0b554a99369f641, regionState=OPENING, regionLocation=c2a32e16c274,35439,1732077105993 2024-11-20T04:32:01,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0817b3a9a087281bb368b13ba62230f1, ASSIGN because future has completed 2024-11-20T04:32:01,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0817b3a9a087281bb368b13ba62230f1, server=c2a32e16c274,35439,1732077105993}] 2024-11-20T04:32:01,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=066e06c5293d73afd0b554a99369f641, ASSIGN because future has completed 2024-11-20T04:32:01,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 066e06c5293d73afd0b554a99369f641, server=c2a32e16c274,35439,1732077105993}] 2024-11-20T04:32:01,905 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:01,905 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 0817b3a9a087281bb368b13ba62230f1, NAME => 'TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-20T04:32:01,906 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,906 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:32:01,906 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,906 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,907 INFO [StoreOpener-0817b3a9a087281bb368b13ba62230f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,908 INFO [StoreOpener-0817b3a9a087281bb368b13ba62230f1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0817b3a9a087281bb368b13ba62230f1 columnFamilyName info 2024-11-20T04:32:01,908 DEBUG [StoreOpener-0817b3a9a087281bb368b13ba62230f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:01,918 DEBUG [StoreOpener-0817b3a9a087281bb368b13ba62230f1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32->hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/6f6a8e9308f54fff8a77403e7ed15245-top 2024-11-20T04:32:01,922 DEBUG [StoreOpener-0817b3a9a087281bb368b13ba62230f1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-8e607ccf19454c75bce5c00547b983f4 2024-11-20T04:32:01,926 DEBUG [StoreOpener-0817b3a9a087281bb368b13ba62230f1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-fbb92fd1341145f3a76514ce3a37041d 2024-11-20T04:32:01,926 INFO [StoreOpener-0817b3a9a087281bb368b13ba62230f1-1 {}] regionserver.HStore(327): Store=0817b3a9a087281bb368b13ba62230f1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:32:01,926 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,927 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,928 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,929 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,929 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,930 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,931 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 0817b3a9a087281bb368b13ba62230f1; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794438, jitterRate=0.010181054472923279}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T04:32:01,931 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:01,931 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 0817b3a9a087281bb368b13ba62230f1: Running coprocessor pre-open hook at 1732077121906Writing region info on filesystem at 1732077121906Initializing all the Stores at 1732077121907 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077121907Cleaning up temporary data from old regions at 1732077121929 (+22 ms)Running coprocessor post-open hooks at 1732077121931 (+2 ms)Region opened successfully at 1732077121931 2024-11-20T04:32:01,932 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., pid=12, masterSystemTime=1732077121901 2024-11-20T04:32:01,933 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 0817b3a9a087281bb368b13ba62230f1:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:32:01,933 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:01,933 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:32:01,934 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:01,934 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1541): 0817b3a9a087281bb368b13ba62230f1/info is initiating minor compaction (all files) 2024-11-20T04:32:01,934 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0817b3a9a087281bb368b13ba62230f1/info in TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:01,934 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32->hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/6f6a8e9308f54fff8a77403e7ed15245-top, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-8e607ccf19454c75bce5c00547b983f4, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-fbb92fd1341145f3a76514ce3a37041d] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp, totalSize=113.9 K 2024-11-20T04:32:01,935 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:01,935 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1732077116887 2024-11-20T04:32:01,935 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:01,935 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:01,935 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 066e06c5293d73afd0b554a99369f641, NAME => 'TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-20T04:32:01,935 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-8e607ccf19454c75bce5c00547b983f4, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1732077120999 2024-11-20T04:32:01,935 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,935 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:32:01,935 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,935 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,936 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=0817b3a9a087281bb368b13ba62230f1, regionState=OPEN, openSeqNum=122, regionLocation=c2a32e16c274,35439,1732077105993 2024-11-20T04:32:01,936 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-fbb92fd1341145f3a76514ce3a37041d, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732077121022 2024-11-20T04:32:01,937 INFO [StoreOpener-066e06c5293d73afd0b554a99369f641-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,937 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-20T04:32:01,938 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-20T04:32:01,938 INFO [StoreOpener-066e06c5293d73afd0b554a99369f641-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066e06c5293d73afd0b554a99369f641 columnFamilyName info 2024-11-20T04:32:01,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-20T04:32:01,938 DEBUG [StoreOpener-066e06c5293d73afd0b554a99369f641-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:01,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0817b3a9a087281bb368b13ba62230f1, server=c2a32e16c274,35439,1732077105993 because future has completed 2024-11-20T04:32:01,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-20T04:32:01,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 0817b3a9a087281bb368b13ba62230f1, server=c2a32e16c274,35439,1732077105993 in 190 msec 2024-11-20T04:32:01,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0817b3a9a087281bb368b13ba62230f1, ASSIGN in 349 msec 2024-11-20T04:32:01,949 DEBUG [StoreOpener-066e06c5293d73afd0b554a99369f641-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32->hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/6f6a8e9308f54fff8a77403e7ed15245-bottom 2024-11-20T04:32:01,950 INFO [StoreOpener-066e06c5293d73afd0b554a99369f641-1 {}] regionserver.HStore(327): Store=066e06c5293d73afd0b554a99369f641/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:32:01,950 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,951 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,952 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,952 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,952 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,954 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,955 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 066e06c5293d73afd0b554a99369f641; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766301, jitterRate=-0.02559855580329895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T04:32:01,955 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:01,955 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 066e06c5293d73afd0b554a99369f641: Running coprocessor pre-open hook at 1732077121936Writing region info on filesystem at 1732077121936Initializing all the Stores at 1732077121936Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077121936Cleaning up temporary data from old regions at 1732077121952 (+16 ms)Running coprocessor post-open hooks at 1732077121955 (+3 ms)Region opened successfully at 1732077121955 2024-11-20T04:32:01,956 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641., pid=13, masterSystemTime=1732077121901 2024-11-20T04:32:01,956 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 066e06c5293d73afd0b554a99369f641:info, priority=-2147483648, current under compaction store size is 2 2024-11-20T04:32:01,957 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:01,957 DEBUG [RS:0;c2a32e16c274:35439-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-20T04:32:01,957 INFO [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:01,957 DEBUG [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.HStore(1541): 066e06c5293d73afd0b554a99369f641/info is initiating minor compaction (all files) 2024-11-20T04:32:01,957 INFO [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 066e06c5293d73afd0b554a99369f641/info in TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:01,958 INFO [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32->hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/6f6a8e9308f54fff8a77403e7ed15245-bottom] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/.tmp, totalSize=73.6 K 2024-11-20T04:32:01,958 DEBUG [RS:0;c2a32e16c274:35439-longCompactions-0 {}] compactions.Compactor(225): Compacting 6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732077116887 2024-11-20T04:32:01,959 DEBUG [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:01,959 INFO [RS_OPEN_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:01,960 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=066e06c5293d73afd0b554a99369f641, regionState=OPEN, openSeqNum=122, regionLocation=c2a32e16c274,35439,1732077105993 2024-11-20T04:32:01,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 066e06c5293d73afd0b554a99369f641, server=c2a32e16c274,35439,1732077105993 because future has completed 2024-11-20T04:32:01,964 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0817b3a9a087281bb368b13ba62230f1#info#compaction#64 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:32:01,965 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/c34df07d262d4af7b5d62afa67a3f1d8 is 1080, key is row0062/info:/1732077118991/Put/seqid=0 2024-11-20T04:32:01,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/info/dfdfdac2c8c3421db0c56f106b67ec37 is 193, key is TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1./info:regioninfo/1732077121935/Put/seqid=0 2024-11-20T04:32:01,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-20T04:32:01,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 066e06c5293d73afd0b554a99369f641, server=c2a32e16c274,35439,1732077105993 in 214 msec 2024-11-20T04:32:01,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-20T04:32:01,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=066e06c5293d73afd0b554a99369f641, ASSIGN in 374 msec 2024-11-20T04:32:01,978 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=f5df11b7e0731dc76c8b1dee4b367d32, daughterA=066e06c5293d73afd0b554a99369f641, daughterB=0817b3a9a087281bb368b13ba62230f1 in 903 msec 2024-11-20T04:32:01,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741850_1026 (size=40830) 2024-11-20T04:32:01,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741850_1026 (size=40830) 2024-11-20T04:32:01,980 INFO [RS:0;c2a32e16c274:35439-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066e06c5293d73afd0b554a99369f641#info#compaction#66 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:32:01,981 DEBUG [RS:0;c2a32e16c274:35439-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/.tmp/info/4f2f90dc51e64b4584476cb3bb0adc9b is 1080, key is row0001/info:/1732077116887/Put/seqid=0 2024-11-20T04:32:01,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741851_1027 (size=9882) 2024-11-20T04:32:01,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741851_1027 (size=9882) 2024-11-20T04:32:01,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/info/dfdfdac2c8c3421db0c56f106b67ec37 2024-11-20T04:32:01,986 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/c34df07d262d4af7b5d62afa67a3f1d8 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c34df07d262d4af7b5d62afa67a3f1d8 2024-11-20T04:32:01,992 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0817b3a9a087281bb368b13ba62230f1/info of 0817b3a9a087281bb368b13ba62230f1 into c34df07d262d4af7b5d62afa67a3f1d8(size=39.9 K), total size for store is 39.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:32:01,992 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:01,992 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., storeName=0817b3a9a087281bb368b13ba62230f1/info, priority=13, startTime=1732077121932; duration=0sec 2024-11-20T04:32:01,992 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:01,993 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0817b3a9a087281bb368b13ba62230f1:info 2024-11-20T04:32:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741852_1028 (size=70862) 2024-11-20T04:32:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741852_1028 (size=70862) 2024-11-20T04:32:02,000 DEBUG [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/.tmp/info/4f2f90dc51e64b4584476cb3bb0adc9b as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/info/4f2f90dc51e64b4584476cb3bb0adc9b 2024-11-20T04:32:02,006 INFO [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 066e06c5293d73afd0b554a99369f641/info of 066e06c5293d73afd0b554a99369f641 into 4f2f90dc51e64b4584476cb3bb0adc9b(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:32:02,007 DEBUG [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 066e06c5293d73afd0b554a99369f641: 2024-11-20T04:32:02,007 INFO [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641., storeName=066e06c5293d73afd0b554a99369f641/info, priority=15, startTime=1732077121956; duration=0sec 2024-11-20T04:32:02,007 DEBUG [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:02,007 DEBUG [RS:0;c2a32e16c274:35439-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066e06c5293d73afd0b554a99369f641:info 2024-11-20T04:32:02,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/ns/798c496a065b4e9eae4e275d58173c66 is 43, key is default/ns:d/1732077106792/Put/seqid=0 2024-11-20T04:32:02,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741853_1029 (size=5153) 2024-11-20T04:32:02,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741853_1029 (size=5153) 2024-11-20T04:32:02,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/ns/798c496a065b4e9eae4e275d58173c66 2024-11-20T04:32:02,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/table/5f753ae95cf74bd2a7e7faefb66c98d7 is 65, key is TestLogRolling-testLogRolling/table:state/1732077107180/Put/seqid=0 2024-11-20T04:32:02,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741854_1030 (size=5340) 2024-11-20T04:32:02,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741854_1030 (size=5340) 2024-11-20T04:32:02,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/table/5f753ae95cf74bd2a7e7faefb66c98d7 2024-11-20T04:32:02,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/info/dfdfdac2c8c3421db0c56f106b67ec37 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/info/dfdfdac2c8c3421db0c56f106b67ec37 2024-11-20T04:32:02,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/info/dfdfdac2c8c3421db0c56f106b67ec37, entries=30, sequenceid=17, filesize=9.7 K 2024-11-20T04:32:02,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/ns/798c496a065b4e9eae4e275d58173c66 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/ns/798c496a065b4e9eae4e275d58173c66 2024-11-20T04:32:02,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/ns/798c496a065b4e9eae4e275d58173c66, entries=2, sequenceid=17, filesize=5.0 K 2024-11-20T04:32:02,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/table/5f753ae95cf74bd2a7e7faefb66c98d7 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/table/5f753ae95cf74bd2a7e7faefb66c98d7 2024-11-20T04:32:02,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/table/5f753ae95cf74bd2a7e7faefb66c98d7, entries=2, sequenceid=17, filesize=5.2 K 2024-11-20T04:32:02,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 122ms, sequenceid=17, compaction requested=false 2024-11-20T04:32:02,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T04:32:02,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:02,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:03,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:03,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:04,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:04,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:05,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:05,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:06,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:06,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:06,991 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T04:32:06,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:06,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:06,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:06,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:06,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:06,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:06,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:06,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,014 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,014 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,014 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,020 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T04:32:07,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:07,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:08,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:08,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:09,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:09,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:10,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:10,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52690 deadline: 1732077141131, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. is not online on c2a32e16c274,35439,1732077105993 2024-11-20T04:32:11,133 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., hostname=c2a32e16c274,35439,1732077105993, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., hostname=c2a32e16c274,35439,1732077105993, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. is not online on c2a32e16c274,35439,1732077105993 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T04:32:11,133 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., hostname=c2a32e16c274,35439,1732077105993, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32. is not online on c2a32e16c274,35439,1732077105993 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T04:32:11,133 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732077106819.f5df11b7e0731dc76c8b1dee4b367d32., hostname=c2a32e16c274,35439,1732077105993, seqNum=2 from cache 2024-11-20T04:32:11,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:11,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:12,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:12,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:13,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:13,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:14,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:14,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:15,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:15,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:15,935 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T04:32:16,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:16,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:17,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:17,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:18,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:18,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:19,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:19,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:20,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:20,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:21,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:21,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:22,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:22,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:23,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:23,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:24,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:24,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:25,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:25,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:26,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:26,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:27,386 INFO [master/c2a32e16c274:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T04:32:27,386 INFO [master/c2a32e16c274:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T04:32:27,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:27,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:28,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:28,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:29,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:29,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:30,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:30,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:31,327 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0095', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., hostname=c2a32e16c274,35439,1732077105993, seqNum=122] 2024-11-20T04:32:31,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:31,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:31,761 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-20T04:32:32,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:32,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:33,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:33,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:32:33,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/01bb1cdd043c43a0964560d2f42eb9c2 is 1080, key is row0095/info:/1732077151328/Put/seqid=0 2024-11-20T04:32:33,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741855_1031 (size=12513) 2024-11-20T04:32:33,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741855_1031 (size=12513) 2024-11-20T04:32:33,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/01bb1cdd043c43a0964560d2f42eb9c2 2024-11-20T04:32:33,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/01bb1cdd043c43a0964560d2f42eb9c2 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/01bb1cdd043c43a0964560d2f42eb9c2 2024-11-20T04:32:33,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/01bb1cdd043c43a0964560d2f42eb9c2, entries=7, sequenceid=132, filesize=12.2 K 2024-11-20T04:32:33,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 0817b3a9a087281bb368b13ba62230f1 in 24ms, sequenceid=132, compaction requested=false 2024-11-20T04:32:33,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:33,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:33,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-20T04:32:33,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/9ee5d887ebd54b12a74246e801037f4f is 1080, key is row0102/info:/1732077153340/Put/seqid=0 2024-11-20T04:32:33,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741856_1032 (size=21156) 2024-11-20T04:32:33,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741856_1032 (size=21156) 2024-11-20T04:32:33,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=150 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/9ee5d887ebd54b12a74246e801037f4f 2024-11-20T04:32:33,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/9ee5d887ebd54b12a74246e801037f4f as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/9ee5d887ebd54b12a74246e801037f4f 2024-11-20T04:32:33,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/9ee5d887ebd54b12a74246e801037f4f, entries=15, sequenceid=150, filesize=20.7 K 2024-11-20T04:32:33,385 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 0817b3a9a087281bb368b13ba62230f1 in 20ms, sequenceid=150, compaction requested=true 2024-11-20T04:32:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0817b3a9a087281bb368b13ba62230f1:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:32:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:33,385 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:32:33,386 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:32:33,386 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1541): 0817b3a9a087281bb368b13ba62230f1/info is initiating minor compaction (all files) 2024-11-20T04:32:33,387 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0817b3a9a087281bb368b13ba62230f1/info in TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:33,387 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c34df07d262d4af7b5d62afa67a3f1d8, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/01bb1cdd043c43a0964560d2f42eb9c2, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/9ee5d887ebd54b12a74246e801037f4f] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp, totalSize=72.8 K 2024-11-20T04:32:33,387 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting c34df07d262d4af7b5d62afa67a3f1d8, keycount=33, bloomtype=ROW, size=39.9 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732077118991 2024-11-20T04:32:33,387 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 01bb1cdd043c43a0964560d2f42eb9c2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732077151328 2024-11-20T04:32:33,388 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ee5d887ebd54b12a74246e801037f4f, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1732077153340 2024-11-20T04:32:33,400 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0817b3a9a087281bb368b13ba62230f1#info#compaction#71 average throughput is 56.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:32:33,400 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/0c53e18bd94a40158e23aec230363c06 is 1080, key is row0062/info:/1732077118991/Put/seqid=0 2024-11-20T04:32:33,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741857_1033 (size=64713) 2024-11-20T04:32:33,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741857_1033 (size=64713) 2024-11-20T04:32:33,411 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/0c53e18bd94a40158e23aec230363c06 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0c53e18bd94a40158e23aec230363c06 2024-11-20T04:32:33,417 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0817b3a9a087281bb368b13ba62230f1/info of 0817b3a9a087281bb368b13ba62230f1 into 0c53e18bd94a40158e23aec230363c06(size=63.2 K), total size for store is 63.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:32:33,417 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:33,417 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., storeName=0817b3a9a087281bb368b13ba62230f1/info, priority=13, startTime=1732077153385; duration=0sec 2024-11-20T04:32:33,417 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:33,417 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0817b3a9a087281bb368b13ba62230f1:info 2024-11-20T04:32:33,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:33,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:34,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:34,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:35,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:35,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-20T04:32:35,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/0438a34d3ab140718edd395bc321a218 is 1080, key is row0117/info:/1732077153366/Put/seqid=0 2024-11-20T04:32:35,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741858_1034 (size=19000) 2024-11-20T04:32:35,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741858_1034 (size=19000) 2024-11-20T04:32:35,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/0438a34d3ab140718edd395bc321a218 2024-11-20T04:32:35,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/0438a34d3ab140718edd395bc321a218 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0438a34d3ab140718edd395bc321a218 2024-11-20T04:32:35,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0438a34d3ab140718edd395bc321a218, entries=13, sequenceid=167, filesize=18.6 K 2024-11-20T04:32:35,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=15.76 KB/16140 for 0817b3a9a087281bb368b13ba62230f1 in 23ms, sequenceid=167, compaction requested=false 2024-11-20T04:32:35,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:35,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:35,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-20T04:32:35,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/7327aa1b7b6a49389c31e66882cf7196 is 1080, key is row0130/info:/1732077155387/Put/seqid=0 2024-11-20T04:32:35,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741859_1035 (size=22238) 2024-11-20T04:32:35,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741859_1035 (size=22238) 2024-11-20T04:32:35,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/7327aa1b7b6a49389c31e66882cf7196 2024-11-20T04:32:35,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/7327aa1b7b6a49389c31e66882cf7196 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7327aa1b7b6a49389c31e66882cf7196 2024-11-20T04:32:35,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0817b3a9a087281bb368b13ba62230f1, server=c2a32e16c274,35439,1732077105993 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-20T04:32:35,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7327aa1b7b6a49389c31e66882cf7196, entries=16, sequenceid=186, filesize=21.7 K 2024-11-20T04:32:35,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52690 deadline: 1732077165433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0817b3a9a087281bb368b13ba62230f1, server=c2a32e16c274,35439,1732077105993 2024-11-20T04:32:35,434 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., hostname=c2a32e16c274,35439,1732077105993, seqNum=122 , the old value is region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., hostname=c2a32e16c274,35439,1732077105993, seqNum=122, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0817b3a9a087281bb368b13ba62230f1, server=c2a32e16c274,35439,1732077105993 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T04:32:35,434 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., hostname=c2a32e16c274,35439,1732077105993, seqNum=122 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0817b3a9a087281bb368b13ba62230f1, server=c2a32e16c274,35439,1732077105993 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T04:32:35,434 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., hostname=c2a32e16c274,35439,1732077105993, seqNum=122 because the exception is null or not the one we care about 2024-11-20T04:32:35,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 0817b3a9a087281bb368b13ba62230f1 in 23ms, sequenceid=186, compaction requested=true 2024-11-20T04:32:35,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:35,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0817b3a9a087281bb368b13ba62230f1:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:32:35,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:35,435 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:32:35,435 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 105951 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:32:35,436 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1541): 0817b3a9a087281bb368b13ba62230f1/info is initiating minor compaction (all files) 2024-11-20T04:32:35,436 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0817b3a9a087281bb368b13ba62230f1/info in TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:35,436 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0c53e18bd94a40158e23aec230363c06, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0438a34d3ab140718edd395bc321a218, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7327aa1b7b6a49389c31e66882cf7196] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp, totalSize=103.5 K 2024-11-20T04:32:35,436 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0c53e18bd94a40158e23aec230363c06, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1732077118991 2024-11-20T04:32:35,436 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0438a34d3ab140718edd395bc321a218, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732077153366 2024-11-20T04:32:35,437 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7327aa1b7b6a49389c31e66882cf7196, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1732077155387 2024-11-20T04:32:35,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:35,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:35,446 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0817b3a9a087281bb368b13ba62230f1#info#compaction#74 average throughput is 86.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:32:35,447 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/b3b2ffc7ce7848339be654a73bdea43c is 1080, key is row0062/info:/1732077118991/Put/seqid=0 2024-11-20T04:32:35,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741860_1036 (size=96154) 2024-11-20T04:32:35,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741860_1036 (size=96154) 2024-11-20T04:32:35,456 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/b3b2ffc7ce7848339be654a73bdea43c as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/b3b2ffc7ce7848339be654a73bdea43c 2024-11-20T04:32:35,461 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0817b3a9a087281bb368b13ba62230f1/info of 0817b3a9a087281bb368b13ba62230f1 into b3b2ffc7ce7848339be654a73bdea43c(size=93.9 K), total size for store is 93.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:32:35,461 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:35,461 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., storeName=0817b3a9a087281bb368b13ba62230f1/info, priority=13, startTime=1732077155434; duration=0sec 2024-11-20T04:32:35,461 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:35,461 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0817b3a9a087281bb368b13ba62230f1:info 2024-11-20T04:32:36,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:36,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:37,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:37,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:38,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:38,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:39,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:39,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:40,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:40,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:41,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:41,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:42,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:42,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:43,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:43,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:44,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:44,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:45,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:45,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:45,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:45,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-20T04:32:45,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/7bcef4cbec1641d497d565b4242d9d8c is 1080, key is row0146/info:/1732077155412/Put/seqid=0 2024-11-20T04:32:45,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741861_1037 (size=20078) 2024-11-20T04:32:45,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741861_1037 (size=20078) 2024-11-20T04:32:45,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/7bcef4cbec1641d497d565b4242d9d8c 2024-11-20T04:32:45,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/7bcef4cbec1641d497d565b4242d9d8c as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7bcef4cbec1641d497d565b4242d9d8c 2024-11-20T04:32:45,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7bcef4cbec1641d497d565b4242d9d8c, entries=14, sequenceid=204, filesize=19.6 K 2024-11-20T04:32:45,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 0817b3a9a087281bb368b13ba62230f1 in 20ms, sequenceid=204, compaction requested=false 2024-11-20T04:32:45,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:45,935 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T04:32:46,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:46,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:46,906 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0817b3a9a087281bb368b13ba62230f1, had cached 0 bytes from a total of 116232 2024-11-20T04:32:46,935 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 066e06c5293d73afd0b554a99369f641, had cached 0 bytes from a total of 70862 2024-11-20T04:32:47,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:47,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:47,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:47,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:32:47,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/f91cfad24f04411e9e691fa6e97c9d2d is 1080, key is row0160/info:/1732077165513/Put/seqid=0 2024-11-20T04:32:47,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741862_1038 (size=12516) 2024-11-20T04:32:47,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741862_1038 (size=12516) 2024-11-20T04:32:47,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/f91cfad24f04411e9e691fa6e97c9d2d 2024-11-20T04:32:47,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/f91cfad24f04411e9e691fa6e97c9d2d as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/f91cfad24f04411e9e691fa6e97c9d2d 2024-11-20T04:32:47,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/f91cfad24f04411e9e691fa6e97c9d2d, entries=7, sequenceid=214, filesize=12.2 K 2024-11-20T04:32:47,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 0817b3a9a087281bb368b13ba62230f1 in 22ms, sequenceid=214, compaction requested=true 2024-11-20T04:32:47,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:47,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0817b3a9a087281bb368b13ba62230f1:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:32:47,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:47,545 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:32:47,546 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128748 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:32:47,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:47,546 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1541): 0817b3a9a087281bb368b13ba62230f1/info is initiating minor compaction (all files) 2024-11-20T04:32:47,547 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0817b3a9a087281bb368b13ba62230f1/info in TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:47,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-20T04:32:47,547 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/b3b2ffc7ce7848339be654a73bdea43c, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7bcef4cbec1641d497d565b4242d9d8c, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/f91cfad24f04411e9e691fa6e97c9d2d] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp, totalSize=125.7 K 2024-11-20T04:32:47,547 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting b3b2ffc7ce7848339be654a73bdea43c, keycount=84, bloomtype=ROW, size=93.9 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1732077118991 2024-11-20T04:32:47,548 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7bcef4cbec1641d497d565b4242d9d8c, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732077155412 2024-11-20T04:32:47,548 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting f91cfad24f04411e9e691fa6e97c9d2d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732077165513 2024-11-20T04:32:47,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/738218dbebcf475ea4dacf15519d8326 is 1080, key is row0167/info:/1732077167524/Put/seqid=0 2024-11-20T04:32:47,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741863_1039 (size=21156) 2024-11-20T04:32:47,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741863_1039 (size=21156) 2024-11-20T04:32:47,557 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/738218dbebcf475ea4dacf15519d8326 2024-11-20T04:32:47,560 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0817b3a9a087281bb368b13ba62230f1#info#compaction#78 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:32:47,561 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/721a5664b77e419da65aa5ac04a64bc4 is 1080, key is row0062/info:/1732077118991/Put/seqid=0 2024-11-20T04:32:47,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/738218dbebcf475ea4dacf15519d8326 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/738218dbebcf475ea4dacf15519d8326 2024-11-20T04:32:47,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741864_1040 (size=118898) 2024-11-20T04:32:47,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741864_1040 (size=118898) 2024-11-20T04:32:47,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/738218dbebcf475ea4dacf15519d8326, entries=15, sequenceid=232, filesize=20.7 K 2024-11-20T04:32:47,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 0817b3a9a087281bb368b13ba62230f1 in 23ms, sequenceid=232, compaction requested=false 2024-11-20T04:32:47,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:47,570 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/721a5664b77e419da65aa5ac04a64bc4 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/721a5664b77e419da65aa5ac04a64bc4 2024-11-20T04:32:47,575 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0817b3a9a087281bb368b13ba62230f1/info of 0817b3a9a087281bb368b13ba62230f1 into 721a5664b77e419da65aa5ac04a64bc4(size=116.1 K), total size for store is 136.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:32:47,575 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:47,575 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., storeName=0817b3a9a087281bb368b13ba62230f1/info, priority=13, startTime=1732077167545; duration=0sec 2024-11-20T04:32:47,575 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:47,575 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0817b3a9a087281bb368b13ba62230f1:info 2024-11-20T04:32:48,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:48,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:49,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:49,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:49,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:49,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T04:32:49,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/4ba6e57585e84f6dbf100b2f949d6045 is 1080, key is row0182/info:/1732077167547/Put/seqid=0 2024-11-20T04:32:49,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741865_1041 (size=17906) 2024-11-20T04:32:49,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741865_1041 (size=17906) 2024-11-20T04:32:49,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/4ba6e57585e84f6dbf100b2f949d6045 2024-11-20T04:32:49,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/4ba6e57585e84f6dbf100b2f949d6045 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/4ba6e57585e84f6dbf100b2f949d6045 2024-11-20T04:32:49,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/4ba6e57585e84f6dbf100b2f949d6045, entries=12, sequenceid=248, filesize=17.5 K 2024-11-20T04:32:49,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for 0817b3a9a087281bb368b13ba62230f1 in 21ms, sequenceid=248, compaction requested=true 2024-11-20T04:32:49,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:49,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0817b3a9a087281bb368b13ba62230f1:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:32:49,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:49,590 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:32:49,591 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157960 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:32:49,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:49,592 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1541): 0817b3a9a087281bb368b13ba62230f1/info is initiating minor compaction (all files) 2024-11-20T04:32:49,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-20T04:32:49,592 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0817b3a9a087281bb368b13ba62230f1/info in TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:49,592 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/721a5664b77e419da65aa5ac04a64bc4, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/738218dbebcf475ea4dacf15519d8326, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/4ba6e57585e84f6dbf100b2f949d6045] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp, totalSize=154.3 K 2024-11-20T04:32:49,592 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 721a5664b77e419da65aa5ac04a64bc4, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732077118991 2024-11-20T04:32:49,593 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 738218dbebcf475ea4dacf15519d8326, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732077167524 2024-11-20T04:32:49,593 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ba6e57585e84f6dbf100b2f949d6045, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732077167547 2024-11-20T04:32:49,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/2dbf3007c2874efa9dc6362cbbae0daa is 1080, key is row0194/info:/1732077169570/Put/seqid=0 2024-11-20T04:32:49,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741866_1042 (size=20086) 2024-11-20T04:32:49,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741866_1042 (size=20086) 2024-11-20T04:32:49,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/2dbf3007c2874efa9dc6362cbbae0daa 2024-11-20T04:32:49,605 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0817b3a9a087281bb368b13ba62230f1#info#compaction#81 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:32:49,605 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/44039c8d371c4e6eb596595d11bf5c2f is 1080, key is row0062/info:/1732077118991/Put/seqid=0 2024-11-20T04:32:49,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/2dbf3007c2874efa9dc6362cbbae0daa as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/2dbf3007c2874efa9dc6362cbbae0daa 2024-11-20T04:32:49,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741867_1043 (size=148311) 2024-11-20T04:32:49,615 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/2dbf3007c2874efa9dc6362cbbae0daa, entries=14, sequenceid=265, filesize=19.6 K 2024-11-20T04:32:49,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741867_1043 (size=148311) 2024-11-20T04:32:49,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 0817b3a9a087281bb368b13ba62230f1 in 24ms, sequenceid=265, compaction requested=false 2024-11-20T04:32:49,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:49,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:49,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-20T04:32:49,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/fadc23578e184681a456fd24b86b7a78 is 1080, key is row0208/info:/1732077169593/Put/seqid=0 2024-11-20T04:32:49,623 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/44039c8d371c4e6eb596595d11bf5c2f as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/44039c8d371c4e6eb596595d11bf5c2f 2024-11-20T04:32:49,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741868_1044 (size=20092) 2024-11-20T04:32:49,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741868_1044 (size=20092) 2024-11-20T04:32:49,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/fadc23578e184681a456fd24b86b7a78 2024-11-20T04:32:49,629 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0817b3a9a087281bb368b13ba62230f1/info of 0817b3a9a087281bb368b13ba62230f1 into 44039c8d371c4e6eb596595d11bf5c2f(size=144.8 K), total size for store is 164.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:32:49,629 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:49,629 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., storeName=0817b3a9a087281bb368b13ba62230f1/info, priority=13, startTime=1732077169590; duration=0sec 2024-11-20T04:32:49,629 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:49,629 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0817b3a9a087281bb368b13ba62230f1:info 2024-11-20T04:32:49,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/fadc23578e184681a456fd24b86b7a78 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/fadc23578e184681a456fd24b86b7a78 2024-11-20T04:32:49,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/fadc23578e184681a456fd24b86b7a78, entries=14, sequenceid=282, filesize=19.6 K 2024-11-20T04:32:49,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for 0817b3a9a087281bb368b13ba62230f1 in 21ms, sequenceid=282, compaction requested=true 2024-11-20T04:32:49,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:49,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0817b3a9a087281bb368b13ba62230f1:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:32:49,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:49,638 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:32:49,639 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188489 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:32:49,639 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1541): 0817b3a9a087281bb368b13ba62230f1/info is initiating minor compaction (all files) 2024-11-20T04:32:49,639 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0817b3a9a087281bb368b13ba62230f1/info in TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:49,639 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/44039c8d371c4e6eb596595d11bf5c2f, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/2dbf3007c2874efa9dc6362cbbae0daa, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/fadc23578e184681a456fd24b86b7a78] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp, totalSize=184.1 K 2024-11-20T04:32:49,639 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 44039c8d371c4e6eb596595d11bf5c2f, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732077118991 2024-11-20T04:32:49,640 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2dbf3007c2874efa9dc6362cbbae0daa, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732077169570 2024-11-20T04:32:49,640 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting fadc23578e184681a456fd24b86b7a78, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732077169593 2024-11-20T04:32:49,651 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0817b3a9a087281bb368b13ba62230f1#info#compaction#83 average throughput is 54.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:32:49,651 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/51f8ad7aecbd413cb70fb49160e292fa is 1080, key is row0062/info:/1732077118991/Put/seqid=0 2024-11-20T04:32:49,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741869_1045 (size=178627) 2024-11-20T04:32:49,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741869_1045 (size=178627) 2024-11-20T04:32:49,660 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/51f8ad7aecbd413cb70fb49160e292fa as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/51f8ad7aecbd413cb70fb49160e292fa 2024-11-20T04:32:49,665 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0817b3a9a087281bb368b13ba62230f1/info of 0817b3a9a087281bb368b13ba62230f1 into 51f8ad7aecbd413cb70fb49160e292fa(size=174.4 K), total size for store is 174.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:32:49,665 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:49,665 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., storeName=0817b3a9a087281bb368b13ba62230f1/info, priority=13, startTime=1732077169638; duration=0sec 2024-11-20T04:32:49,665 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:49,665 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0817b3a9a087281bb368b13ba62230f1:info 2024-11-20T04:32:50,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:50,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:51,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:51,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:51,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:51,630 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T04:32:51,631 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=68, reuseRatio=88.31% 2024-11-20T04:32:51,631 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-20T04:32:51,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/c069bb8e82084436876533265e87c2c1 is 1080, key is row0222/info:/1732077169618/Put/seqid=0 2024-11-20T04:32:51,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741870_1046 (size=12523) 2024-11-20T04:32:51,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741870_1046 (size=12523) 2024-11-20T04:32:51,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/c069bb8e82084436876533265e87c2c1 2024-11-20T04:32:51,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/c069bb8e82084436876533265e87c2c1 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c069bb8e82084436876533265e87c2c1 2024-11-20T04:32:51,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c069bb8e82084436876533265e87c2c1, entries=7, sequenceid=294, filesize=12.2 K 2024-11-20T04:32:51,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 0817b3a9a087281bb368b13ba62230f1 in 22ms, sequenceid=294, compaction requested=false 2024-11-20T04:32:51,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:51,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35439 {}] regionserver.HRegion(8855): Flush requested on 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:51,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-20T04:32:51,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/599918cd502444b1a8085fc5cbc95c61 is 1080, key is row0229/info:/1732077171630/Put/seqid=0 2024-11-20T04:32:51,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741871_1047 (size=22254) 2024-11-20T04:32:51,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741871_1047 (size=22254) 2024-11-20T04:32:51,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/599918cd502444b1a8085fc5cbc95c61 2024-11-20T04:32:51,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/599918cd502444b1a8085fc5cbc95c61 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/599918cd502444b1a8085fc5cbc95c61 2024-11-20T04:32:51,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/599918cd502444b1a8085fc5cbc95c61, entries=16, sequenceid=313, filesize=21.7 K 2024-11-20T04:32:51,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=12.61 KB/12912 for 0817b3a9a087281bb368b13ba62230f1 in 21ms, sequenceid=313, compaction requested=true 2024-11-20T04:32:51,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:51,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0817b3a9a087281bb368b13ba62230f1:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T04:32:51,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:51,674 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T04:32:51,675 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 213404 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T04:32:51,675 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1541): 0817b3a9a087281bb368b13ba62230f1/info is initiating minor compaction (all files) 2024-11-20T04:32:51,675 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0817b3a9a087281bb368b13ba62230f1/info in TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:51,675 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/51f8ad7aecbd413cb70fb49160e292fa, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c069bb8e82084436876533265e87c2c1, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/599918cd502444b1a8085fc5cbc95c61] into tmpdir=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp, totalSize=208.4 K 2024-11-20T04:32:51,676 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 51f8ad7aecbd413cb70fb49160e292fa, keycount=160, bloomtype=ROW, size=174.4 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732077118991 2024-11-20T04:32:51,676 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting c069bb8e82084436876533265e87c2c1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732077169618 2024-11-20T04:32:51,676 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] compactions.Compactor(225): Compacting 599918cd502444b1a8085fc5cbc95c61, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732077171630 2024-11-20T04:32:51,687 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0817b3a9a087281bb368b13ba62230f1#info#compaction#86 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T04:32:51,687 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/0246ea77c5b744dd9e19666285c9da87 is 1080, key is row0062/info:/1732077118991/Put/seqid=0 2024-11-20T04:32:51,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741872_1048 (size=203554) 2024-11-20T04:32:51,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741872_1048 (size=203554) 2024-11-20T04:32:51,698 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/0246ea77c5b744dd9e19666285c9da87 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0246ea77c5b744dd9e19666285c9da87 2024-11-20T04:32:51,702 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0817b3a9a087281bb368b13ba62230f1/info of 0817b3a9a087281bb368b13ba62230f1 into 0246ea77c5b744dd9e19666285c9da87(size=198.8 K), total size for store is 198.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T04:32:51,703 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:51,703 INFO [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1., storeName=0817b3a9a087281bb368b13ba62230f1/info, priority=13, startTime=1732077171674; duration=0sec 2024-11-20T04:32:51,703 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T04:32:51,703 DEBUG [RS:0;c2a32e16c274:35439-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0817b3a9a087281bb368b13ba62230f1:info 2024-11-20T04:32:52,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:52,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:53,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:53,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:53,673 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-20T04:32:53,673 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35439%2C1732077105993.1732077173673 2024-11-20T04:32:53,690 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,690 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,690 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,690 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,690 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,690 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993/c2a32e16c274%2C35439%2C1732077105993.1732077106373 with entries=308, filesize=307.10 KB; new WAL /user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993/c2a32e16c274%2C35439%2C1732077105993.1732077173673 2024-11-20T04:32:53,691 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37567:37567),(127.0.0.1/127.0.0.1:37249:37249)] 2024-11-20T04:32:53,691 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993/c2a32e16c274%2C35439%2C1732077105993.1732077106373 is not closed yet, will try archiving it next time 2024-11-20T04:32:53,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741833_1009 (size=314476) 2024-11-20T04:32:53,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741833_1009 (size=314476) 2024-11-20T04:32:53,695 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 066e06c5293d73afd0b554a99369f641: 2024-11-20T04:32:53,695 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-20T04:32:53,700 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/info/4e385b2e838a442e9a68c8fbb209a5eb is 186, key is TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641./info:regioninfo/1732077121960/Put/seqid=0 2024-11-20T04:32:53,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741874_1050 (size=6153) 2024-11-20T04:32:53,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741874_1050 (size=6153) 2024-11-20T04:32:53,706 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/info/4e385b2e838a442e9a68c8fbb209a5eb 2024-11-20T04:32:53,711 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/.tmp/info/4e385b2e838a442e9a68c8fbb209a5eb as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/info/4e385b2e838a442e9a68c8fbb209a5eb 2024-11-20T04:32:53,716 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/info/4e385b2e838a442e9a68c8fbb209a5eb, entries=5, sequenceid=21, filesize=6.0 K 2024-11-20T04:32:53,717 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 22ms, sequenceid=21, compaction requested=false 2024-11-20T04:32:53,717 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T04:32:53,717 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0817b3a9a087281bb368b13ba62230f1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T04:32:53,720 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/fb2ec8b3e8c144119bab40d483eca1b0 is 1080, key is row0245/info:/1732077171654/Put/seqid=0 2024-11-20T04:32:53,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741875_1051 (size=17918) 2024-11-20T04:32:53,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741875_1051 (size=17918) 2024-11-20T04:32:53,726 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/fb2ec8b3e8c144119bab40d483eca1b0 2024-11-20T04:32:53,731 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/.tmp/info/fb2ec8b3e8c144119bab40d483eca1b0 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/fb2ec8b3e8c144119bab40d483eca1b0 2024-11-20T04:32:53,735 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/fb2ec8b3e8c144119bab40d483eca1b0, entries=12, sequenceid=329, filesize=17.5 K 2024-11-20T04:32:53,736 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for 0817b3a9a087281bb368b13ba62230f1 in 19ms, sequenceid=329, compaction requested=false 2024-11-20T04:32:53,736 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0817b3a9a087281bb368b13ba62230f1: 2024-11-20T04:32:53,736 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C35439%2C1732077105993.1732077173736 2024-11-20T04:32:53,741 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,741 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,741 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,741 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,741 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,741 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993/c2a32e16c274%2C35439%2C1732077105993.1732077173673 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993/c2a32e16c274%2C35439%2C1732077105993.1732077173736 2024-11-20T04:32:53,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741873_1049 (size=731) 2024-11-20T04:32:53,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741873_1049 (size=731) 2024-11-20T04:32:53,745 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37567:37567),(127.0.0.1/127.0.0.1:37249:37249)] 2024-11-20T04:32:53,745 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993/c2a32e16c274%2C35439%2C1732077105993.1732077106373 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/oldWALs/c2a32e16c274%2C35439%2C1732077105993.1732077106373 2024-11-20T04:32:53,745 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T04:32:53,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T04:32:53,746 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:32:53,746 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:32:53,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:53,746 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/WALs/c2a32e16c274,35439,1732077105993/c2a32e16c274%2C35439%2C1732077105993.1732077173673 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/oldWALs/c2a32e16c274%2C35439%2C1732077105993.1732077173673 2024-11-20T04:32:53,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:53,746 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T04:32:53,746 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T04:32:53,746 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=455427502, stopped=false 2024-11-20T04:32:53,746 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c2a32e16c274,39875,1732077105950 2024-11-20T04:32:53,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:32:53,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:32:53,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:53,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:53,748 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:32:53,748 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:32:53,748 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:32:53,748 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:53,749 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2a32e16c274,35439,1732077105993' ***** 2024-11-20T04:32:53,749 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T04:32:53,749 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T04:32:53,749 INFO [RS:0;c2a32e16c274:35439 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T04:32:53,749 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:32:53,749 INFO [RS:0;c2a32e16c274:35439 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T04:32:53,749 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(3091): Received CLOSE for 066e06c5293d73afd0b554a99369f641 2024-11-20T04:32:53,749 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(3091): Received CLOSE for 0817b3a9a087281bb368b13ba62230f1 2024-11-20T04:32:53,749 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(959): stopping server c2a32e16c274,35439,1732077105993 2024-11-20T04:32:53,749 INFO [RS:0;c2a32e16c274:35439 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:32:53,750 INFO [RS:0;c2a32e16c274:35439 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c2a32e16c274:35439. 2024-11-20T04:32:53,750 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 066e06c5293d73afd0b554a99369f641, disabling compactions & flushes 2024-11-20T04:32:53,750 DEBUG [RS:0;c2a32e16c274:35439 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:32:53,750 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:53,750 DEBUG [RS:0;c2a32e16c274:35439 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:53,750 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:53,750 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T04:32:53,750 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. after waiting 0 ms 2024-11-20T04:32:53,750 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:53,750 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T04:32:53,750 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T04:32:53,750 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T04:32:53,750 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T04:32:53,750 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:32:53,752 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-20T04:32:53,752 DEBUG [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(1325): Online Regions={066e06c5293d73afd0b554a99369f641=TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641., 1588230740=hbase:meta,,1.1588230740, 0817b3a9a087281bb368b13ba62230f1=TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.} 2024-11-20T04:32:53,752 DEBUG [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(1351): Waiting on 066e06c5293d73afd0b554a99369f641, 0817b3a9a087281bb368b13ba62230f1, 1588230740 2024-11-20T04:32:53,752 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:32:53,753 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:32:53,753 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:32:53,753 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:32:53,753 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:32:53,752 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32->hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/6f6a8e9308f54fff8a77403e7ed15245-bottom] to archive 2024-11-20T04:32:53,753 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T04:32:53,755 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:53,755 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c2a32e16c274:39875 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-20T04:32:53,755 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-20T04:32:53,763 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/066e06c5293d73afd0b554a99369f641/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=121 2024-11-20T04:32:53,763 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:53,763 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 066e06c5293d73afd0b554a99369f641: Waiting for close lock at 1732077173749Running coprocessor pre-close hooks at 1732077173749Disabling compacts and flushes for region at 1732077173749Disabling writes for close at 1732077173750 (+1 ms)Writing region close event to WAL at 1732077173756 (+6 ms)Running coprocessor post-close hooks at 1732077173763 (+7 ms)Closed at 1732077173763 2024-11-20T04:32:53,763 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732077121067.066e06c5293d73afd0b554a99369f641. 2024-11-20T04:32:53,763 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0817b3a9a087281bb368b13ba62230f1, disabling compactions & flushes 2024-11-20T04:32:53,763 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:53,763 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:53,763 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. after waiting 0 ms 2024-11-20T04:32:53,764 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:53,764 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32->hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/f5df11b7e0731dc76c8b1dee4b367d32/info/6f6a8e9308f54fff8a77403e7ed15245-top, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-8e607ccf19454c75bce5c00547b983f4, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c34df07d262d4af7b5d62afa67a3f1d8, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-fbb92fd1341145f3a76514ce3a37041d, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/01bb1cdd043c43a0964560d2f42eb9c2, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0c53e18bd94a40158e23aec230363c06, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/9ee5d887ebd54b12a74246e801037f4f, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0438a34d3ab140718edd395bc321a218, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/b3b2ffc7ce7848339be654a73bdea43c, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7327aa1b7b6a49389c31e66882cf7196, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7bcef4cbec1641d497d565b4242d9d8c, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/721a5664b77e419da65aa5ac04a64bc4, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/f91cfad24f04411e9e691fa6e97c9d2d, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/738218dbebcf475ea4dacf15519d8326, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/44039c8d371c4e6eb596595d11bf5c2f, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/4ba6e57585e84f6dbf100b2f949d6045, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/2dbf3007c2874efa9dc6362cbbae0daa, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/51f8ad7aecbd413cb70fb49160e292fa, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/fadc23578e184681a456fd24b86b7a78, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c069bb8e82084436876533265e87c2c1, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/599918cd502444b1a8085fc5cbc95c61] to archive 2024-11-20T04:32:53,765 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T04:32:53,765 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-20T04:32:53,766 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:32:53,766 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:32:53,766 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732077173752Running coprocessor pre-close hooks at 1732077173752Disabling compacts and flushes for region at 1732077173752Disabling writes for close at 1732077173753 (+1 ms)Writing region close event to WAL at 1732077173762 (+9 ms)Running coprocessor post-close hooks at 1732077173766 (+4 ms)Closed at 1732077173766 2024-11-20T04:32:53,766 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T04:32:53,766 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/6f6a8e9308f54fff8a77403e7ed15245.f5df11b7e0731dc76c8b1dee4b367d32 2024-11-20T04:32:53,768 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-8e607ccf19454c75bce5c00547b983f4 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-8e607ccf19454c75bce5c00547b983f4 2024-11-20T04:32:53,769 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c34df07d262d4af7b5d62afa67a3f1d8 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c34df07d262d4af7b5d62afa67a3f1d8 2024-11-20T04:32:53,770 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-fbb92fd1341145f3a76514ce3a37041d to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/TestLogRolling-testLogRolling=f5df11b7e0731dc76c8b1dee4b367d32-fbb92fd1341145f3a76514ce3a37041d 2024-11-20T04:32:53,771 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/01bb1cdd043c43a0964560d2f42eb9c2 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/01bb1cdd043c43a0964560d2f42eb9c2 2024-11-20T04:32:53,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0c53e18bd94a40158e23aec230363c06 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0c53e18bd94a40158e23aec230363c06 2024-11-20T04:32:53,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/9ee5d887ebd54b12a74246e801037f4f to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/9ee5d887ebd54b12a74246e801037f4f 2024-11-20T04:32:53,774 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0438a34d3ab140718edd395bc321a218 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/0438a34d3ab140718edd395bc321a218 2024-11-20T04:32:53,775 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/b3b2ffc7ce7848339be654a73bdea43c to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/b3b2ffc7ce7848339be654a73bdea43c 2024-11-20T04:32:53,776 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7327aa1b7b6a49389c31e66882cf7196 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7327aa1b7b6a49389c31e66882cf7196 2024-11-20T04:32:53,777 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7bcef4cbec1641d497d565b4242d9d8c to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/7bcef4cbec1641d497d565b4242d9d8c 2024-11-20T04:32:53,778 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/721a5664b77e419da65aa5ac04a64bc4 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/721a5664b77e419da65aa5ac04a64bc4 2024-11-20T04:32:53,779 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/f91cfad24f04411e9e691fa6e97c9d2d to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/f91cfad24f04411e9e691fa6e97c9d2d 2024-11-20T04:32:53,780 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/738218dbebcf475ea4dacf15519d8326 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/738218dbebcf475ea4dacf15519d8326 2024-11-20T04:32:53,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/44039c8d371c4e6eb596595d11bf5c2f to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/44039c8d371c4e6eb596595d11bf5c2f 2024-11-20T04:32:53,782 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/4ba6e57585e84f6dbf100b2f949d6045 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/4ba6e57585e84f6dbf100b2f949d6045 2024-11-20T04:32:53,783 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/2dbf3007c2874efa9dc6362cbbae0daa to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/2dbf3007c2874efa9dc6362cbbae0daa 2024-11-20T04:32:53,784 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/51f8ad7aecbd413cb70fb49160e292fa to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/51f8ad7aecbd413cb70fb49160e292fa 2024-11-20T04:32:53,785 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/fadc23578e184681a456fd24b86b7a78 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/fadc23578e184681a456fd24b86b7a78 2024-11-20T04:32:53,786 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c069bb8e82084436876533265e87c2c1 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/c069bb8e82084436876533265e87c2c1 2024-11-20T04:32:53,787 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/599918cd502444b1a8085fc5cbc95c61 to hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/archive/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/info/599918cd502444b1a8085fc5cbc95c61 2024-11-20T04:32:53,787 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c34df07d262d4af7b5d62afa67a3f1d8=40830, 01bb1cdd043c43a0964560d2f42eb9c2=12513, 0c53e18bd94a40158e23aec230363c06=64713, 9ee5d887ebd54b12a74246e801037f4f=21156, 0438a34d3ab140718edd395bc321a218=19000, b3b2ffc7ce7848339be654a73bdea43c=96154, 7327aa1b7b6a49389c31e66882cf7196=22238, 7bcef4cbec1641d497d565b4242d9d8c=20078, 721a5664b77e419da65aa5ac04a64bc4=118898, f91cfad24f04411e9e691fa6e97c9d2d=12516, 738218dbebcf475ea4dacf15519d8326=21156, 44039c8d371c4e6eb596595d11bf5c2f=148311, 4ba6e57585e84f6dbf100b2f949d6045=17906, 2dbf3007c2874efa9dc6362cbbae0daa=20086, 51f8ad7aecbd413cb70fb49160e292fa=178627, fadc23578e184681a456fd24b86b7a78=20092, c069bb8e82084436876533265e87c2c1=12523, 599918cd502444b1a8085fc5cbc95c61=22254] 2024-11-20T04:32:53,790 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/data/default/TestLogRolling-testLogRolling/0817b3a9a087281bb368b13ba62230f1/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=121 2024-11-20T04:32:53,790 INFO [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:53,790 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0817b3a9a087281bb368b13ba62230f1: Waiting for close lock at 1732077173763Running coprocessor pre-close hooks at 1732077173763Disabling compacts and flushes for region at 1732077173763Disabling writes for close at 1732077173763Writing region close event to WAL at 1732077173787 (+24 ms)Running coprocessor post-close hooks at 1732077173790 (+3 ms)Closed at 1732077173790 2024-11-20T04:32:53,791 DEBUG [RS_CLOSE_REGION-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732077121067.0817b3a9a087281bb368b13ba62230f1. 2024-11-20T04:32:53,953 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(976): stopping server c2a32e16c274,35439,1732077105993; all regions closed. 2024-11-20T04:32:53,953 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,953 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,953 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,953 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,953 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741834_1010 (size=8107) 2024-11-20T04:32:53,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741834_1010 (size=8107) 2024-11-20T04:32:53,958 DEBUG [RS:0;c2a32e16c274:35439 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/oldWALs 2024-11-20T04:32:53,958 INFO [RS:0;c2a32e16c274:35439 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C35439%2C1732077105993.meta:.meta(num 1732077106743) 2024-11-20T04:32:53,958 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,958 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,958 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,958 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,959 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:53,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741876_1052 (size=778) 2024-11-20T04:32:53,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741876_1052 (size=778) 2024-11-20T04:32:53,962 DEBUG [RS:0;c2a32e16c274:35439 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/oldWALs 2024-11-20T04:32:53,962 INFO [RS:0;c2a32e16c274:35439 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C35439%2C1732077105993:(num 1732077173736) 2024-11-20T04:32:53,962 DEBUG [RS:0;c2a32e16c274:35439 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:53,962 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:32:53,962 INFO [RS:0;c2a32e16c274:35439 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:32:53,962 INFO [RS:0;c2a32e16c274:35439 {}] hbase.ChoreService(370): Chore service for: regionserver/c2a32e16c274:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T04:32:53,962 INFO [RS:0;c2a32e16c274:35439 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:32:53,962 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:32:53,962 INFO [RS:0;c2a32e16c274:35439 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35439 2024-11-20T04:32:53,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2a32e16c274,35439,1732077105993 2024-11-20T04:32:53,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:32:53,965 INFO [RS:0;c2a32e16c274:35439 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:32:53,967 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2a32e16c274,35439,1732077105993] 2024-11-20T04:32:53,968 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2a32e16c274,35439,1732077105993 already deleted, retry=false 2024-11-20T04:32:53,968 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2a32e16c274,35439,1732077105993 expired; onlineServers=0 2024-11-20T04:32:53,968 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c2a32e16c274,39875,1732077105950' ***** 2024-11-20T04:32:53,968 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T04:32:53,968 INFO [M:0;c2a32e16c274:39875 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:32:53,969 INFO [M:0;c2a32e16c274:39875 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:32:53,969 DEBUG [M:0;c2a32e16c274:39875 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T04:32:53,969 DEBUG [M:0;c2a32e16c274:39875 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T04:32:53,969 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T04:32:53,969 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077106148 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077106148,5,FailOnTimeoutGroup] 2024-11-20T04:32:53,969 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077106148 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077106148,5,FailOnTimeoutGroup] 2024-11-20T04:32:53,969 INFO [M:0;c2a32e16c274:39875 {}] hbase.ChoreService(370): Chore service for: master/c2a32e16c274:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T04:32:53,969 INFO [M:0;c2a32e16c274:39875 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:32:53,969 DEBUG [M:0;c2a32e16c274:39875 {}] master.HMaster(1795): Stopping service threads 2024-11-20T04:32:53,969 INFO [M:0;c2a32e16c274:39875 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T04:32:53,969 INFO [M:0;c2a32e16c274:39875 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:32:53,969 INFO [M:0;c2a32e16c274:39875 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T04:32:53,969 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T04:32:53,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T04:32:53,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:53,970 DEBUG [M:0;c2a32e16c274:39875 {}] zookeeper.ZKUtil(347): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T04:32:53,970 WARN [M:0;c2a32e16c274:39875 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T04:32:53,971 INFO [M:0;c2a32e16c274:39875 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/.lastflushedseqids 2024-11-20T04:32:53,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741877_1053 (size=228) 2024-11-20T04:32:53,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741877_1053 (size=228) 2024-11-20T04:32:53,975 INFO [M:0;c2a32e16c274:39875 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T04:32:53,976 INFO [M:0;c2a32e16c274:39875 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T04:32:53,976 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:32:53,976 INFO [M:0;c2a32e16c274:39875 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:53,976 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:53,976 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:32:53,976 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:53,976 INFO [M:0;c2a32e16c274:39875 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-20T04:32:53,991 DEBUG [M:0;c2a32e16c274:39875 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2548c4eae65f45bea130e9a372fe900f is 82, key is hbase:meta,,1/info:regioninfo/1732077106776/Put/seqid=0 2024-11-20T04:32:53,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741878_1054 (size=5672) 2024-11-20T04:32:53,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741878_1054 (size=5672) 2024-11-20T04:32:53,999 INFO [M:0;c2a32e16c274:39875 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2548c4eae65f45bea130e9a372fe900f 2024-11-20T04:32:54,019 DEBUG [M:0;c2a32e16c274:39875 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/139e78f923144f7c96f6ae37efb49e49 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732077107184/Put/seqid=0 2024-11-20T04:32:54,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741879_1055 (size=7091) 2024-11-20T04:32:54,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741879_1055 (size=7091) 2024-11-20T04:32:54,024 INFO [M:0;c2a32e16c274:39875 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/139e78f923144f7c96f6ae37efb49e49 2024-11-20T04:32:54,028 INFO [M:0;c2a32e16c274:39875 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 139e78f923144f7c96f6ae37efb49e49 2024-11-20T04:32:54,043 DEBUG [M:0;c2a32e16c274:39875 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e4239e414a444ddb9d562ddb2b4aaa10 is 69, key is c2a32e16c274,35439,1732077105993/rs:state/1732077106228/Put/seqid=0 2024-11-20T04:32:54,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741880_1056 (size=5156) 2024-11-20T04:32:54,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741880_1056 (size=5156) 2024-11-20T04:32:54,048 INFO [M:0;c2a32e16c274:39875 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e4239e414a444ddb9d562ddb2b4aaa10 2024-11-20T04:32:54,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:32:54,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35439-0x10133a51da00001, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:32:54,068 INFO [RS:0;c2a32e16c274:35439 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:32:54,068 INFO [RS:0;c2a32e16c274:35439 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2a32e16c274,35439,1732077105993; zookeeper connection closed. 2024-11-20T04:32:54,068 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@22ca431b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@22ca431b 2024-11-20T04:32:54,068 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T04:32:54,071 DEBUG [M:0;c2a32e16c274:39875 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b96ac142783462c8f9f880877f4f0ec is 52, key is load_balancer_on/state:d/1732077106815/Put/seqid=0 2024-11-20T04:32:54,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741881_1057 (size=5056) 2024-11-20T04:32:54,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741881_1057 (size=5056) 2024-11-20T04:32:54,076 INFO [M:0;c2a32e16c274:39875 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b96ac142783462c8f9f880877f4f0ec 2024-11-20T04:32:54,081 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2548c4eae65f45bea130e9a372fe900f as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2548c4eae65f45bea130e9a372fe900f 2024-11-20T04:32:54,085 INFO [M:0;c2a32e16c274:39875 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2548c4eae65f45bea130e9a372fe900f, entries=8, sequenceid=125, filesize=5.5 K 2024-11-20T04:32:54,085 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/139e78f923144f7c96f6ae37efb49e49 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/139e78f923144f7c96f6ae37efb49e49 2024-11-20T04:32:54,089 INFO [M:0;c2a32e16c274:39875 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 139e78f923144f7c96f6ae37efb49e49 2024-11-20T04:32:54,090 INFO [M:0;c2a32e16c274:39875 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/139e78f923144f7c96f6ae37efb49e49, entries=13, sequenceid=125, filesize=6.9 K 2024-11-20T04:32:54,090 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e4239e414a444ddb9d562ddb2b4aaa10 as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e4239e414a444ddb9d562ddb2b4aaa10 2024-11-20T04:32:54,095 INFO [M:0;c2a32e16c274:39875 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e4239e414a444ddb9d562ddb2b4aaa10, entries=1, sequenceid=125, filesize=5.0 K 2024-11-20T04:32:54,095 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b96ac142783462c8f9f880877f4f0ec as hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0b96ac142783462c8f9f880877f4f0ec 2024-11-20T04:32:54,099 INFO [M:0;c2a32e16c274:39875 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36491/user/jenkins/test-data/dc7739f1-11c9-bf2f-8acb-28bf69f63fc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0b96ac142783462c8f9f880877f4f0ec, entries=1, sequenceid=125, filesize=4.9 K 2024-11-20T04:32:54,100 INFO [M:0;c2a32e16c274:39875 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=125, compaction requested=false 2024-11-20T04:32:54,101 INFO [M:0;c2a32e16c274:39875 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:54,101 DEBUG [M:0;c2a32e16c274:39875 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732077173976Disabling compacts and flushes for region at 1732077173976Disabling writes for close at 1732077173976Obtaining lock to block concurrent updates at 1732077173976Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732077173976Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1732077173976Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732077173977 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732077173977Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732077173991 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732077173991Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732077174004 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732077174018 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732077174018Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732077174029 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732077174043 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732077174043Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732077174052 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732077174070 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732077174070Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49cfcca8: reopening flushed file at 1732077174080 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fab7c64: reopening flushed file at 1732077174085 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@138e303: reopening flushed file at 1732077174090 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@86ca730: reopening flushed file at 1732077174095 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=125, compaction requested=false at 1732077174100 (+5 ms)Writing region close event to WAL at 1732077174101 (+1 ms)Closed at 1732077174101 2024-11-20T04:32:54,102 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:54,102 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:54,102 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:54,102 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:54,102 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:54,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41351 is added to blk_1073741830_1006 (size=61332) 2024-11-20T04:32:54,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43619 is added to blk_1073741830_1006 (size=61332) 2024-11-20T04:32:54,105 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:32:54,105 INFO [M:0;c2a32e16c274:39875 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T04:32:54,105 INFO [M:0;c2a32e16c274:39875 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39875 2024-11-20T04:32:54,106 INFO [M:0;c2a32e16c274:39875 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:32:54,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:32:54,207 INFO [M:0;c2a32e16c274:39875 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:32:54,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39875-0x10133a51da00000, quorum=127.0.0.1:50236, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:32:54,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ae6275a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:32:54,210 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a34980e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:32:54,210 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:32:54,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26a9d62d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:32:54,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25dcc129{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.log.dir/,STOPPED} 2024-11-20T04:32:54,212 WARN [BP-1347059229-172.17.0.2-1732077105287 heartbeating to localhost/127.0.0.1:36491 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:32:54,212 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:32:54,212 WARN [BP-1347059229-172.17.0.2-1732077105287 heartbeating to localhost/127.0.0.1:36491 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1347059229-172.17.0.2-1732077105287 (Datanode Uuid 71d11684-2788-4c06-9f50-bd10d0468c2f) service to localhost/127.0.0.1:36491 2024-11-20T04:32:54,212 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:32:54,213 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/data/data3/current/BP-1347059229-172.17.0.2-1732077105287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:32:54,213 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/data/data4/current/BP-1347059229-172.17.0.2-1732077105287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:32:54,213 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:32:54,215 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f932cc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:32:54,215 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ff7780b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:32:54,215 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:32:54,215 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2735da07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:32:54,216 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16ccf5f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.log.dir/,STOPPED} 2024-11-20T04:32:54,217 WARN [BP-1347059229-172.17.0.2-1732077105287 heartbeating to localhost/127.0.0.1:36491 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:32:54,217 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:32:54,217 WARN [BP-1347059229-172.17.0.2-1732077105287 heartbeating to localhost/127.0.0.1:36491 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1347059229-172.17.0.2-1732077105287 (Datanode Uuid 079a8ecb-ae18-47ab-b528-e7a0ef038e0d) service to localhost/127.0.0.1:36491 2024-11-20T04:32:54,217 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:32:54,218 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/data/data1/current/BP-1347059229-172.17.0.2-1732077105287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:32:54,218 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/cluster_68750bbf-b3ca-58c0-c5df-ac7d54614ae7/data/data2/current/BP-1347059229-172.17.0.2-1732077105287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:32:54,218 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:32:54,224 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1520cb76{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:32:54,225 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65b546b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:32:54,225 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:32:54,225 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5327e2a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:32:54,225 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33382c80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.log.dir/,STOPPED} 2024-11-20T04:32:54,232 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T04:32:54,243 INFO [regionserver/c2a32e16c274:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:32:54,258 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T04:32:54,270 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=226 (was 206) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36491 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36491 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36491 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36491 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:36491 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=20 (was 57), ProcessCount=11 (was 11), AvailableMemoryMB=7066 (was 7124) 2024-11-20T04:32:54,278 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=226, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=20, ProcessCount=11, AvailableMemoryMB=7065 2024-11-20T04:32:54,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T04:32:54,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.log.dir so I do NOT create it in target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d 2024-11-20T04:32:54,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/710fa355-84f4-8096-d240-df0283177c0b/hadoop.tmp.dir so I do NOT create it in target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d 2024-11-20T04:32:54,279 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd, deleteOnExit=true 2024-11-20T04:32:54,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T04:32:54,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/test.cache.data in system properties and HBase conf 2024-11-20T04:32:54,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T04:32:54,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/hadoop.log.dir in system properties and HBase conf 2024-11-20T04:32:54,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T04:32:54,280 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:32:54,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T04:32:54,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/nfs.dump.dir in system properties and HBase conf 2024-11-20T04:32:54,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/java.io.tmpdir in system properties and HBase conf 2024-11-20T04:32:54,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T04:32:54,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T04:32:54,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T04:32:54,295 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:32:54,353 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:32:54,357 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:32:54,358 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:32:54,358 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:32:54,358 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:32:54,358 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:32:54,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d7cc0b1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:32:54,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78047c32{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:32:54,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:54,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:54,473 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1926aa54{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/java.io.tmpdir/jetty-localhost-34263-hadoop-hdfs-3_4_1-tests_jar-_-any-9162716253631862611/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:32:54,474 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18478920{HTTP/1.1, (http/1.1)}{localhost:34263} 2024-11-20T04:32:54,474 INFO [Time-limited test {}] server.Server(415): Started @308552ms 2024-11-20T04:32:54,486 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T04:32:54,537 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:32:54,540 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:32:54,541 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:32:54,541 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:32:54,541 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T04:32:54,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e818af2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:32:54,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2578bc63{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:32:54,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@413a6699{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/java.io.tmpdir/jetty-localhost-33895-hadoop-hdfs-3_4_1-tests_jar-_-any-6334935249998053805/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:32:54,657 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1cfa6b2d{HTTP/1.1, (http/1.1)}{localhost:33895} 2024-11-20T04:32:54,657 INFO [Time-limited test {}] server.Server(415): Started @308735ms 2024-11-20T04:32:54,658 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:32:54,687 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T04:32:54,690 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T04:32:54,690 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T04:32:54,690 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T04:32:54,690 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T04:32:54,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2fb7753b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/hadoop.log.dir/,AVAILABLE} 2024-11-20T04:32:54,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f638c2a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T04:32:54,752 WARN [Thread-2461 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/data/data1/current/BP-802853067-172.17.0.2-1732077174301/current, will proceed with Du for space computation calculation, 2024-11-20T04:32:54,752 WARN [Thread-2462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/data/data2/current/BP-802853067-172.17.0.2-1732077174301/current, will proceed with Du for space computation calculation, 2024-11-20T04:32:54,774 WARN [Thread-2440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:32:54,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5be120cf4ff19cca with lease ID 0x204f356dc98503b7: Processing first storage report for DS-bfa38076-66a0-4d5b-94cc-31057baa725b from datanode DatanodeRegistration(127.0.0.1:34551, datanodeUuid=bcd3f976-15e1-4adb-823e-fb7f0c4b2004, infoPort=43061, infoSecurePort=0, ipcPort=44877, storageInfo=lv=-57;cid=testClusterID;nsid=1270780232;c=1732077174301) 2024-11-20T04:32:54,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5be120cf4ff19cca with lease ID 0x204f356dc98503b7: from storage DS-bfa38076-66a0-4d5b-94cc-31057baa725b node DatanodeRegistration(127.0.0.1:34551, datanodeUuid=bcd3f976-15e1-4adb-823e-fb7f0c4b2004, infoPort=43061, infoSecurePort=0, ipcPort=44877, storageInfo=lv=-57;cid=testClusterID;nsid=1270780232;c=1732077174301), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T04:32:54,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5be120cf4ff19cca with lease ID 0x204f356dc98503b7: Processing first storage report for DS-469f840b-b1d8-4b20-b403-e8f04a12af20 from datanode DatanodeRegistration(127.0.0.1:34551, datanodeUuid=bcd3f976-15e1-4adb-823e-fb7f0c4b2004, infoPort=43061, infoSecurePort=0, ipcPort=44877, storageInfo=lv=-57;cid=testClusterID;nsid=1270780232;c=1732077174301) 2024-11-20T04:32:54,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5be120cf4ff19cca with lease ID 0x204f356dc98503b7: from storage DS-469f840b-b1d8-4b20-b403-e8f04a12af20 node DatanodeRegistration(127.0.0.1:34551, datanodeUuid=bcd3f976-15e1-4adb-823e-fb7f0c4b2004, infoPort=43061, infoSecurePort=0, ipcPort=44877, storageInfo=lv=-57;cid=testClusterID;nsid=1270780232;c=1732077174301), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:32:54,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64440bf6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/java.io.tmpdir/jetty-localhost-38203-hadoop-hdfs-3_4_1-tests_jar-_-any-9690247533811055501/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:32:54,806 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2551ca75{HTTP/1.1, (http/1.1)}{localhost:38203} 2024-11-20T04:32:54,806 INFO [Time-limited test {}] server.Server(415): Started @308885ms 2024-11-20T04:32:54,807 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T04:32:54,905 WARN [Thread-2487 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/data/data3/current/BP-802853067-172.17.0.2-1732077174301/current, will proceed with Du for space computation calculation, 2024-11-20T04:32:54,905 WARN [Thread-2488 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/data/data4/current/BP-802853067-172.17.0.2-1732077174301/current, will proceed with Du for space computation calculation, 2024-11-20T04:32:54,922 WARN [Thread-2476 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T04:32:54,924 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x57e74e0fe690c0a9 with lease ID 0x204f356dc98503b8: Processing first storage report for DS-8eaa1e4e-a268-4e3b-ad12-89159198cf69 from datanode DatanodeRegistration(127.0.0.1:42479, datanodeUuid=89e707ec-ddbc-4b56-8051-76684a93c5a9, infoPort=33137, infoSecurePort=0, ipcPort=42891, storageInfo=lv=-57;cid=testClusterID;nsid=1270780232;c=1732077174301) 2024-11-20T04:32:54,924 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x57e74e0fe690c0a9 with lease ID 0x204f356dc98503b8: from storage DS-8eaa1e4e-a268-4e3b-ad12-89159198cf69 node DatanodeRegistration(127.0.0.1:42479, datanodeUuid=89e707ec-ddbc-4b56-8051-76684a93c5a9, infoPort=33137, infoSecurePort=0, ipcPort=42891, storageInfo=lv=-57;cid=testClusterID;nsid=1270780232;c=1732077174301), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:32:54,924 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x57e74e0fe690c0a9 with lease ID 0x204f356dc98503b8: Processing first storage report for DS-c9371939-0b26-49f5-968b-b47e3c93f54a from datanode DatanodeRegistration(127.0.0.1:42479, datanodeUuid=89e707ec-ddbc-4b56-8051-76684a93c5a9, infoPort=33137, infoSecurePort=0, ipcPort=42891, storageInfo=lv=-57;cid=testClusterID;nsid=1270780232;c=1732077174301) 2024-11-20T04:32:54,924 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x57e74e0fe690c0a9 with lease ID 0x204f356dc98503b8: from storage DS-c9371939-0b26-49f5-968b-b47e3c93f54a node DatanodeRegistration(127.0.0.1:42479, datanodeUuid=89e707ec-ddbc-4b56-8051-76684a93c5a9, infoPort=33137, infoSecurePort=0, ipcPort=42891, storageInfo=lv=-57;cid=testClusterID;nsid=1270780232;c=1732077174301), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T04:32:54,929 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d 2024-11-20T04:32:54,932 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/zookeeper_0, clientPort=58278, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T04:32:54,933 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58278 2024-11-20T04:32:54,933 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:32:54,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:32:54,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:32:54,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741825_1001 (size=7) 2024-11-20T04:32:54,944 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3 with version=8 2024-11-20T04:32:54,944 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35115/user/jenkins/test-data/9c6d917d-abf3-9bba-14b4-6fd180d0875c/hbase-staging 2024-11-20T04:32:54,946 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:32:54,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:32:54,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:32:54,946 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:32:54,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:32:54,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:32:54,946 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T04:32:54,947 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:32:54,947 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34053 2024-11-20T04:32:54,948 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34053 connecting to ZooKeeper ensemble=127.0.0.1:58278 2024-11-20T04:32:54,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340530x0, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:32:54,956 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34053-0x10133a62b230000 connected 2024-11-20T04:32:54,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:32:54,977 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:32:54,979 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:32:54,979 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3, hbase.cluster.distributed=false 2024-11-20T04:32:54,981 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:32:54,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34053 2024-11-20T04:32:54,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34053 2024-11-20T04:32:54,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34053 2024-11-20T04:32:54,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34053 2024-11-20T04:32:54,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34053 2024-11-20T04:32:55,000 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2a32e16c274:0 server-side Connection retries=45 2024-11-20T04:32:55,000 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:32:55,000 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T04:32:55,000 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T04:32:55,000 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T04:32:55,000 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T04:32:55,000 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T04:32:55,000 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T04:32:55,001 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41211 2024-11-20T04:32:55,002 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41211 connecting to ZooKeeper ensemble=127.0.0.1:58278 2024-11-20T04:32:55,002 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:32:55,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:32:55,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412110x0, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T04:32:55,008 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412110x0, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:32:55,008 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41211-0x10133a62b230001 connected 2024-11-20T04:32:55,008 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T04:32:55,009 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T04:32:55,009 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T04:32:55,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T04:32:55,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41211 2024-11-20T04:32:55,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41211 2024-11-20T04:32:55,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41211 2024-11-20T04:32:55,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41211 2024-11-20T04:32:55,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41211 2024-11-20T04:32:55,023 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c2a32e16c274:34053 2024-11-20T04:32:55,024 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c2a32e16c274,34053,1732077174946 2024-11-20T04:32:55,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:32:55,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:32:55,026 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c2a32e16c274,34053,1732077174946 2024-11-20T04:32:55,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T04:32:55,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,028 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T04:32:55,028 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c2a32e16c274,34053,1732077174946 from backup master directory 2024-11-20T04:32:55,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c2a32e16c274,34053,1732077174946 2024-11-20T04:32:55,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:32:55,031 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:32:55,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T04:32:55,031 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c2a32e16c274,34053,1732077174946 2024-11-20T04:32:55,034 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/hbase.id] with ID: 399328b2-13b5-40b9-bd62-0ed4523b2b5f 2024-11-20T04:32:55,034 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/.tmp/hbase.id 2024-11-20T04:32:55,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:32:55,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741826_1002 (size=42) 2024-11-20T04:32:55,041 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/.tmp/hbase.id]:[hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/hbase.id] 2024-11-20T04:32:55,051 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:32:55,051 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T04:32:55,052 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T04:32:55,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:32:55,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741827_1003 (size=196) 2024-11-20T04:32:55,061 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T04:32:55,062 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T04:32:55,062 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:32:55,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:32:55,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741828_1004 (size=1189) 2024-11-20T04:32:55,069 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store 2024-11-20T04:32:55,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:32:55,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741829_1005 (size=34) 2024-11-20T04:32:55,074 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:32:55,074 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:32:55,074 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:55,074 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:55,074 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:32:55,074 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:55,074 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:55,074 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732077175074Disabling compacts and flushes for region at 1732077175074Disabling writes for close at 1732077175074Writing region close event to WAL at 1732077175074Closed at 1732077175074 2024-11-20T04:32:55,075 WARN [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/.initializing 2024-11-20T04:32:55,075 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/WALs/c2a32e16c274,34053,1732077174946 2024-11-20T04:32:55,077 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C34053%2C1732077174946, suffix=, logDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/WALs/c2a32e16c274,34053,1732077174946, archiveDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/oldWALs, maxLogs=10 2024-11-20T04:32:55,078 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C34053%2C1732077174946.1732077175078 2024-11-20T04:32:55,082 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/WALs/c2a32e16c274,34053,1732077174946/c2a32e16c274%2C34053%2C1732077174946.1732077175078 2024-11-20T04:32:55,082 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43061:43061),(127.0.0.1/127.0.0.1:33137:33137)] 2024-11-20T04:32:55,083 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:32:55,083 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:32:55,083 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,083 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,084 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T04:32:55,085 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,086 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:32:55,086 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T04:32:55,087 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:32:55,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T04:32:55,088 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:32:55,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T04:32:55,089 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T04:32:55,090 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,091 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,091 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,092 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,092 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,092 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T04:32:55,093 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T04:32:55,095 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:32:55,095 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786731, jitterRate=3.8117170333862305E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T04:32:55,096 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732077175083Initializing all the Stores at 1732077175084 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077175084Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077175084Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077175084Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077175084Cleaning up temporary data from old regions at 1732077175092 (+8 ms)Region opened successfully at 1732077175096 (+4 ms) 2024-11-20T04:32:55,096 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T04:32:55,099 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8dd55f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:32:55,100 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T04:32:55,100 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T04:32:55,100 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T04:32:55,100 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T04:32:55,100 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T04:32:55,101 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T04:32:55,101 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T04:32:55,103 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T04:32:55,104 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T04:32:55,105 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T04:32:55,106 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T04:32:55,106 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T04:32:55,107 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T04:32:55,107 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T04:32:55,108 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T04:32:55,114 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T04:32:55,115 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T04:32:55,116 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T04:32:55,118 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T04:32:55,119 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T04:32:55,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:32:55,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T04:32:55,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,121 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c2a32e16c274,34053,1732077174946, sessionid=0x10133a62b230000, setting cluster-up flag (Was=false) 2024-11-20T04:32:55,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,131 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T04:32:55,131 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,34053,1732077174946 2024-11-20T04:32:55,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,140 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T04:32:55,141 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2a32e16c274,34053,1732077174946 2024-11-20T04:32:55,142 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T04:32:55,144 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T04:32:55,144 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T04:32:55,144 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T04:32:55,144 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c2a32e16c274,34053,1732077174946 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T04:32:55,145 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:32:55,145 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:32:55,146 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:32:55,146 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c2a32e16c274:0, corePoolSize=5, maxPoolSize=5 2024-11-20T04:32:55,146 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c2a32e16c274:0, corePoolSize=10, maxPoolSize=10 2024-11-20T04:32:55,146 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,146 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:32:55,146 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,147 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:32:55,147 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T04:32:55,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732077205148 2024-11-20T04:32:55,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T04:32:55,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T04:32:55,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T04:32:55,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T04:32:55,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T04:32:55,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T04:32:55,148 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,148 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,148 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T04:32:55,149 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T04:32:55,149 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T04:32:55,149 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T04:32:55,149 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T04:32:55,149 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T04:32:55,150 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077175149,5,FailOnTimeoutGroup] 2024-11-20T04:32:55,150 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077175150,5,FailOnTimeoutGroup] 2024-11-20T04:32:55,150 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,150 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T04:32:55,150 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,150 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:32:55,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741831_1007 (size=1321) 2024-11-20T04:32:55,158 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T04:32:55,159 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3 2024-11-20T04:32:55,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:32:55,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741832_1008 (size=32) 2024-11-20T04:32:55,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:32:55,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:32:55,166 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:32:55,167 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:32:55,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:32:55,168 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:32:55,168 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:32:55,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:32:55,169 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:32:55,169 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:32:55,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:32:55,171 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:32:55,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:32:55,171 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:32:55,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740 2024-11-20T04:32:55,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740 2024-11-20T04:32:55,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:32:55,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:32:55,173 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:32:55,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:32:55,175 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T04:32:55,175 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879669, jitterRate=0.11855736374855042}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:32:55,176 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732077175165Initializing all the Stores at 1732077175165Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077175165Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077175165Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077175165Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077175165Cleaning up temporary data from old regions at 1732077175173 (+8 ms)Region opened successfully at 1732077175176 (+3 ms) 2024-11-20T04:32:55,176 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:32:55,176 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:32:55,176 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:32:55,176 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:32:55,176 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:32:55,177 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:32:55,177 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732077175176Disabling compacts and flushes for region at 1732077175176Disabling writes for close at 1732077175176Writing region close event to WAL at 1732077175177 (+1 ms)Closed at 1732077175177 2024-11-20T04:32:55,178 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:32:55,178 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T04:32:55,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T04:32:55,179 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:32:55,180 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T04:32:55,213 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(746): ClusterId : 399328b2-13b5-40b9-bd62-0ed4523b2b5f 2024-11-20T04:32:55,213 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T04:32:55,215 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T04:32:55,215 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T04:32:55,217 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T04:32:55,217 DEBUG [RS:0;c2a32e16c274:41211 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a7dd909, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2a32e16c274/172.17.0.2:0 2024-11-20T04:32:55,229 DEBUG [RS:0;c2a32e16c274:41211 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c2a32e16c274:41211 2024-11-20T04:32:55,229 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T04:32:55,229 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T04:32:55,229 DEBUG [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T04:32:55,230 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2a32e16c274,34053,1732077174946 with port=41211, startcode=1732077175000 2024-11-20T04:32:55,230 DEBUG [RS:0;c2a32e16c274:41211 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T04:32:55,233 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55819, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T04:32:55,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34053 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34053 {}] master.ServerManager(517): Registering regionserver=c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,234 DEBUG [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3 2024-11-20T04:32:55,234 DEBUG [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33535 2024-11-20T04:32:55,234 DEBUG [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T04:32:55,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:32:55,237 DEBUG [RS:0;c2a32e16c274:41211 {}] zookeeper.ZKUtil(111): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,237 WARN [RS:0;c2a32e16c274:41211 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T04:32:55,237 INFO [RS:0;c2a32e16c274:41211 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:32:55,237 DEBUG [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,237 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2a32e16c274,41211,1732077175000] 2024-11-20T04:32:55,240 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T04:32:55,242 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T04:32:55,242 INFO [RS:0;c2a32e16c274:41211 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T04:32:55,242 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,242 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T04:32:55,243 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T04:32:55,243 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2a32e16c274:0, corePoolSize=2, maxPoolSize=2 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2a32e16c274:0, corePoolSize=1, maxPoolSize=1 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:32:55,243 DEBUG [RS:0;c2a32e16c274:41211 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2a32e16c274:0, corePoolSize=3, maxPoolSize=3 2024-11-20T04:32:55,244 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,244 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,244 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,244 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,244 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,244 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,41211,1732077175000-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:32:55,259 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T04:32:55,259 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,41211,1732077175000-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,259 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,259 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.Replication(171): c2a32e16c274,41211,1732077175000 started 2024-11-20T04:32:55,273 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,273 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(1482): Serving as c2a32e16c274,41211,1732077175000, RpcServer on c2a32e16c274/172.17.0.2:41211, sessionid=0x10133a62b230001 2024-11-20T04:32:55,273 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T04:32:55,273 DEBUG [RS:0;c2a32e16c274:41211 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,273 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,41211,1732077175000' 2024-11-20T04:32:55,273 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T04:32:55,274 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T04:32:55,274 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T04:32:55,274 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T04:32:55,274 DEBUG [RS:0;c2a32e16c274:41211 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,274 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2a32e16c274,41211,1732077175000' 2024-11-20T04:32:55,274 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T04:32:55,274 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T04:32:55,275 DEBUG [RS:0;c2a32e16c274:41211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T04:32:55,275 INFO [RS:0;c2a32e16c274:41211 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T04:32:55,275 INFO [RS:0;c2a32e16c274:41211 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T04:32:55,330 WARN [c2a32e16c274:34053 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-20T04:32:55,377 INFO [RS:0;c2a32e16c274:41211 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C41211%2C1732077175000, suffix=, logDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/c2a32e16c274,41211,1732077175000, archiveDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/oldWALs, maxLogs=32 2024-11-20T04:32:55,377 INFO [RS:0;c2a32e16c274:41211 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41211%2C1732077175000.1732077175377 2024-11-20T04:32:55,382 INFO [RS:0;c2a32e16c274:41211 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/c2a32e16c274,41211,1732077175000/c2a32e16c274%2C41211%2C1732077175000.1732077175377 2024-11-20T04:32:55,383 DEBUG [RS:0;c2a32e16c274:41211 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43061:43061),(127.0.0.1/127.0.0.1:33137:33137)] 2024-11-20T04:32:55,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,40241,1732076972119/c2a32e16c274%2C40241%2C1732076972119.meta.1732076973144.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:55,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40051/user/jenkins/test-data/bc517afe-0775-8d00-a8b8-609b966bf094/WALs/c2a32e16c274,41341,1732076973295/c2a32e16c274%2C41341%2C1732076973295.1732076973502 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T04:32:55,580 DEBUG [c2a32e16c274:34053 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T04:32:55,581 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,582 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,41211,1732077175000, state=OPENING 2024-11-20T04:32:55,584 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T04:32:55,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,586 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T04:32:55,586 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:32:55,586 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:32:55,586 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,41211,1732077175000}] 2024-11-20T04:32:55,739 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T04:32:55,740 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48039, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T04:32:55,744 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T04:32:55,744 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:32:55,745 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2a32e16c274%2C41211%2C1732077175000.meta, suffix=.meta, logDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/c2a32e16c274,41211,1732077175000, archiveDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/oldWALs, maxLogs=32 2024-11-20T04:32:55,746 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c2a32e16c274%2C41211%2C1732077175000.meta.1732077175746.meta 2024-11-20T04:32:55,753 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/c2a32e16c274,41211,1732077175000/c2a32e16c274%2C41211%2C1732077175000.meta.1732077175746.meta 2024-11-20T04:32:55,760 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43061:43061),(127.0.0.1/127.0.0.1:33137:33137)] 2024-11-20T04:32:55,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T04:32:55,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T04:32:55,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T04:32:55,761 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T04:32:55,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T04:32:55,761 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T04:32:55,762 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T04:32:55,762 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T04:32:55,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T04:32:55,764 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T04:32:55,764 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:32:55,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T04:32:55,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T04:32:55,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:32:55,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T04:32:55,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T04:32:55,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:32:55,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T04:32:55,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T04:32:55,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T04:32:55,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T04:32:55,767 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T04:32:55,768 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740 2024-11-20T04:32:55,769 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740 2024-11-20T04:32:55,769 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T04:32:55,770 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T04:32:55,770 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T04:32:55,771 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T04:32:55,772 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829789, jitterRate=0.05513164401054382}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T04:32:55,772 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T04:32:55,772 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732077175762Writing region info on filesystem at 1732077175762Initializing all the Stores at 1732077175762Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077175762Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077175763 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732077175763Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732077175763Cleaning up temporary data from old regions at 1732077175770 (+7 ms)Running coprocessor post-open hooks at 1732077175772 (+2 ms)Region opened successfully at 1732077175772 2024-11-20T04:32:55,773 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732077175738 2024-11-20T04:32:55,775 DEBUG [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T04:32:55,775 INFO [RS_OPEN_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T04:32:55,776 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,776 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2a32e16c274,41211,1732077175000, state=OPEN 2024-11-20T04:32:55,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:32:55,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T04:32:55,783 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,783 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:32:55,783 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T04:32:55,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T04:32:55,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c2a32e16c274,41211,1732077175000 in 197 msec 2024-11-20T04:32:55,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T04:32:55,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-20T04:32:55,787 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T04:32:55,787 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T04:32:55,788 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:32:55,788 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,41211,1732077175000, seqNum=-1] 2024-11-20T04:32:55,788 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:32:55,790 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50061, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:32:55,793 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 649 msec 2024-11-20T04:32:55,793 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732077175793, completionTime=-1 2024-11-20T04:32:55,793 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T04:32:55,793 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-20T04:32:55,795 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-20T04:32:55,795 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732077235795 2024-11-20T04:32:55,795 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732077295795 2024-11-20T04:32:55,795 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-20T04:32:55,796 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34053,1732077174946-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,796 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34053,1732077174946-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,796 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34053,1732077174946-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,796 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c2a32e16c274:34053, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,796 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,796 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,797 DEBUG [master/c2a32e16c274:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T04:32:55,799 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.768sec 2024-11-20T04:32:55,799 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T04:32:55,799 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T04:32:55,799 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T04:32:55,799 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T04:32:55,799 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T04:32:55,799 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34053,1732077174946-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T04:32:55,799 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34053,1732077174946-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T04:32:55,801 DEBUG [master/c2a32e16c274:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T04:32:55,801 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T04:32:55,801 INFO [master/c2a32e16c274:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2a32e16c274,34053,1732077174946-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T04:32:55,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1271622e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:32:55,813 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c2a32e16c274,34053,-1 for getting cluster id 2024-11-20T04:32:55,814 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T04:32:55,815 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '399328b2-13b5-40b9-bd62-0ed4523b2b5f' 2024-11-20T04:32:55,815 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T04:32:55,815 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "399328b2-13b5-40b9-bd62-0ed4523b2b5f" 2024-11-20T04:32:55,816 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45806f01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:32:55,816 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c2a32e16c274,34053,-1] 2024-11-20T04:32:55,816 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T04:32:55,816 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:55,817 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53028, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T04:32:55,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61cbf6ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T04:32:55,818 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T04:32:55,819 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2a32e16c274,41211,1732077175000, seqNum=-1] 2024-11-20T04:32:55,819 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T04:32:55,820 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43822, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T04:32:55,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c2a32e16c274,34053,1732077174946 2024-11-20T04:32:55,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T04:32:55,824 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T04:32:55,824 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T04:32:55,826 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/test.com,8080,1, archiveDir=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/oldWALs, maxLogs=32 2024-11-20T04:32:55,826 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732077175826 2024-11-20T04:32:55,831 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732077175826 2024-11-20T04:32:55,832 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43061:43061),(127.0.0.1/127.0.0.1:33137:33137)] 2024-11-20T04:32:55,836 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732077175836 2024-11-20T04:32:55,841 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,841 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,841 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,841 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,841 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,841 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732077175826 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732077175836 2024-11-20T04:32:55,842 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33137:33137),(127.0.0.1/127.0.0.1:43061:43061)] 2024-11-20T04:32:55,842 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732077175826 is not closed yet, will try archiving it next time 2024-11-20T04:32:55,843 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,843 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,843 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,843 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,843 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:55,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741835_1011 (size=93) 2024-11-20T04:32:55,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741835_1011 (size=93) 2024-11-20T04:32:55,844 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732077175826 to hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/oldWALs/test.com%2C8080%2C1.1732077175826 2024-11-20T04:32:55,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741836_1012 (size=93) 2024-11-20T04:32:55,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741836_1012 (size=93) 2024-11-20T04:32:55,847 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/oldWALs 2024-11-20T04:32:55,847 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732077175836) 2024-11-20T04:32:55,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T04:32:55,848 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:32:55,848 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:32:55,848 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:55,848 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:55,848 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T04:32:55,848 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T04:32:55,848 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1054820621, stopped=false 2024-11-20T04:32:55,848 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c2a32e16c274,34053,1732077174946 2024-11-20T04:32:55,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:32:55,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T04:32:55,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:55,850 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:32:55,850 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T04:32:55,850 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:32:55,850 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:55,850 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2a32e16c274,41211,1732077175000' ***** 2024-11-20T04:32:55,850 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T04:32:55,850 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:32:55,850 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T04:32:55,851 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T04:32:55,851 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(959): stopping server c2a32e16c274,41211,1732077175000 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c2a32e16c274:41211. 2024-11-20T04:32:55,851 DEBUG [RS:0;c2a32e16c274:41211 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T04:32:55,851 DEBUG [RS:0;c2a32e16c274:41211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T04:32:55,851 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-20T04:32:55,851 DEBUG [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T04:32:55,851 DEBUG [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T04:32:55,851 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T04:32:55,851 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T04:32:55,851 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T04:32:55,852 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T04:32:55,852 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T04:32:55,852 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-20T04:32:55,867 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740/.tmp/ns/180a4920edab49e7af7a863bb46e670d is 43, key is default/ns:d/1732077175790/Put/seqid=0 2024-11-20T04:32:55,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741837_1013 (size=5153) 2024-11-20T04:32:55,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741837_1013 (size=5153) 2024-11-20T04:32:55,872 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740/.tmp/ns/180a4920edab49e7af7a863bb46e670d 2024-11-20T04:32:55,877 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740/.tmp/ns/180a4920edab49e7af7a863bb46e670d as hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740/ns/180a4920edab49e7af7a863bb46e670d 2024-11-20T04:32:55,880 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740/ns/180a4920edab49e7af7a863bb46e670d, entries=2, sequenceid=6, filesize=5.0 K 2024-11-20T04:32:55,881 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-11-20T04:32:55,884 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T04:32:55,885 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T04:32:55,885 INFO [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T04:32:55,885 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732077175851Running coprocessor pre-close hooks at 1732077175851Disabling compacts and flushes for region at 1732077175851Disabling writes for close at 1732077175852 (+1 ms)Obtaining lock to block concurrent updates at 1732077175852Preparing flush snapshotting stores in 1588230740 at 1732077175852Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732077175852Flushing stores of hbase:meta,,1.1588230740 at 1732077175853 (+1 ms)Flushing 1588230740/ns: creating writer at 1732077175853Flushing 1588230740/ns: appending metadata at 1732077175867 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732077175867Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53fa172: reopening flushed file at 1732077175876 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1732077175881 (+5 ms)Writing region close event to WAL at 1732077175882 (+1 ms)Running coprocessor post-close hooks at 1732077175885 (+3 ms)Closed at 1732077175885 2024-11-20T04:32:55,885 DEBUG [RS_CLOSE_META-regionserver/c2a32e16c274:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T04:32:56,052 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(976): stopping server c2a32e16c274,41211,1732077175000; all regions closed. 2024-11-20T04:32:56,052 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,052 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,052 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,052 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,052 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741834_1010 (size=1152) 2024-11-20T04:32:56,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741834_1010 (size=1152) 2024-11-20T04:32:56,057 DEBUG [RS:0;c2a32e16c274:41211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/oldWALs 2024-11-20T04:32:56,057 INFO [RS:0;c2a32e16c274:41211 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C41211%2C1732077175000.meta:.meta(num 1732077175746) 2024-11-20T04:32:56,057 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,057 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,057 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,057 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,057 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741833_1009 (size=93) 2024-11-20T04:32:56,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741833_1009 (size=93) 2024-11-20T04:32:56,061 DEBUG [RS:0;c2a32e16c274:41211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/oldWALs 2024-11-20T04:32:56,061 INFO [RS:0;c2a32e16c274:41211 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c2a32e16c274%2C41211%2C1732077175000:(num 1732077175377) 2024-11-20T04:32:56,061 DEBUG [RS:0;c2a32e16c274:41211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T04:32:56,061 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T04:32:56,061 INFO [RS:0;c2a32e16c274:41211 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:32:56,061 INFO [RS:0;c2a32e16c274:41211 {}] hbase.ChoreService(370): Chore service for: regionserver/c2a32e16c274:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T04:32:56,062 INFO [RS:0;c2a32e16c274:41211 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:32:56,062 INFO [regionserver/c2a32e16c274:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:32:56,062 INFO [RS:0;c2a32e16c274:41211 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41211 2024-11-20T04:32:56,064 INFO [RS:0;c2a32e16c274:41211 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:32:56,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2a32e16c274,41211,1732077175000 2024-11-20T04:32:56,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T04:32:56,067 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2a32e16c274,41211,1732077175000] 2024-11-20T04:32:56,071 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2a32e16c274,41211,1732077175000 already deleted, retry=false 2024-11-20T04:32:56,071 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2a32e16c274,41211,1732077175000 expired; onlineServers=0 2024-11-20T04:32:56,071 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c2a32e16c274,34053,1732077174946' ***** 2024-11-20T04:32:56,071 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T04:32:56,071 INFO [M:0;c2a32e16c274:34053 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T04:32:56,071 INFO [M:0;c2a32e16c274:34053 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T04:32:56,071 DEBUG [M:0;c2a32e16c274:34053 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T04:32:56,072 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T04:32:56,072 DEBUG [M:0;c2a32e16c274:34053 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T04:32:56,072 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077175150 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.small.0-1732077175150,5,FailOnTimeoutGroup] 2024-11-20T04:32:56,072 DEBUG [master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077175149 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2a32e16c274:0:becomeActiveMaster-HFileCleaner.large.0-1732077175149,5,FailOnTimeoutGroup] 2024-11-20T04:32:56,072 INFO [M:0;c2a32e16c274:34053 {}] hbase.ChoreService(370): Chore service for: master/c2a32e16c274:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T04:32:56,072 INFO [M:0;c2a32e16c274:34053 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T04:32:56,072 DEBUG [M:0;c2a32e16c274:34053 {}] master.HMaster(1795): Stopping service threads 2024-11-20T04:32:56,072 INFO [M:0;c2a32e16c274:34053 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T04:32:56,072 INFO [M:0;c2a32e16c274:34053 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T04:32:56,072 INFO [M:0;c2a32e16c274:34053 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T04:32:56,072 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T04:32:56,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T04:32:56,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T04:32:56,080 DEBUG [M:0;c2a32e16c274:34053 {}] zookeeper.ZKUtil(347): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T04:32:56,080 WARN [M:0;c2a32e16c274:34053 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T04:32:56,080 INFO [M:0;c2a32e16c274:34053 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/.lastflushedseqids 2024-11-20T04:32:56,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741838_1014 (size=108) 2024-11-20T04:32:56,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741838_1014 (size=108) 2024-11-20T04:32:56,086 INFO [M:0;c2a32e16c274:34053 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T04:32:56,086 INFO [M:0;c2a32e16c274:34053 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T04:32:56,086 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T04:32:56,086 INFO [M:0;c2a32e16c274:34053 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:56,086 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:56,086 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T04:32:56,086 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:56,086 INFO [M:0;c2a32e16c274:34053 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-20T04:32:56,101 DEBUG [M:0;c2a32e16c274:34053 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e28b23ab49aa47978302ebac760a870e is 82, key is hbase:meta,,1/info:regioninfo/1732077175776/Put/seqid=0 2024-11-20T04:32:56,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741839_1015 (size=5672) 2024-11-20T04:32:56,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741839_1015 (size=5672) 2024-11-20T04:32:56,106 INFO [M:0;c2a32e16c274:34053 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e28b23ab49aa47978302ebac760a870e 2024-11-20T04:32:56,124 DEBUG [M:0;c2a32e16c274:34053 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d85457e33f62453b8062f00dd8252430 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732077175793/Put/seqid=0 2024-11-20T04:32:56,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741840_1016 (size=5275) 2024-11-20T04:32:56,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741840_1016 (size=5275) 2024-11-20T04:32:56,130 INFO [M:0;c2a32e16c274:34053 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d85457e33f62453b8062f00dd8252430 2024-11-20T04:32:56,149 DEBUG [M:0;c2a32e16c274:34053 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b49a6df525534039b520951161b20680 is 69, key is c2a32e16c274,41211,1732077175000/rs:state/1732077175233/Put/seqid=0 2024-11-20T04:32:56,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741841_1017 (size=5156) 2024-11-20T04:32:56,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741841_1017 (size=5156) 2024-11-20T04:32:56,154 INFO [M:0;c2a32e16c274:34053 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b49a6df525534039b520951161b20680 2024-11-20T04:32:56,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:32:56,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41211-0x10133a62b230001, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:32:56,167 INFO [RS:0;c2a32e16c274:41211 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:32:56,167 INFO [RS:0;c2a32e16c274:41211 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2a32e16c274,41211,1732077175000; zookeeper connection closed. 2024-11-20T04:32:56,167 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3e23e78e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3e23e78e 2024-11-20T04:32:56,168 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T04:32:56,172 DEBUG [M:0;c2a32e16c274:34053 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8be23b7f7af74511ac33b205b41c4a94 is 52, key is load_balancer_on/state:d/1732077175823/Put/seqid=0 2024-11-20T04:32:56,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741842_1018 (size=5056) 2024-11-20T04:32:56,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741842_1018 (size=5056) 2024-11-20T04:32:56,177 INFO [M:0;c2a32e16c274:34053 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8be23b7f7af74511ac33b205b41c4a94 2024-11-20T04:32:56,181 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e28b23ab49aa47978302ebac760a870e as hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e28b23ab49aa47978302ebac760a870e 2024-11-20T04:32:56,185 INFO [M:0;c2a32e16c274:34053 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e28b23ab49aa47978302ebac760a870e, entries=8, sequenceid=29, filesize=5.5 K 2024-11-20T04:32:56,186 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d85457e33f62453b8062f00dd8252430 as hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d85457e33f62453b8062f00dd8252430 2024-11-20T04:32:56,189 INFO [M:0;c2a32e16c274:34053 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d85457e33f62453b8062f00dd8252430, entries=3, sequenceid=29, filesize=5.2 K 2024-11-20T04:32:56,190 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b49a6df525534039b520951161b20680 as hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b49a6df525534039b520951161b20680 2024-11-20T04:32:56,193 INFO [M:0;c2a32e16c274:34053 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b49a6df525534039b520951161b20680, entries=1, sequenceid=29, filesize=5.0 K 2024-11-20T04:32:56,194 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8be23b7f7af74511ac33b205b41c4a94 as hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8be23b7f7af74511ac33b205b41c4a94 2024-11-20T04:32:56,197 INFO [M:0;c2a32e16c274:34053 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33535/user/jenkins/test-data/727a25ec-7b75-e6f9-4dc9-74a29e20abf3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8be23b7f7af74511ac33b205b41c4a94, entries=1, sequenceid=29, filesize=4.9 K 2024-11-20T04:32:56,198 INFO [M:0;c2a32e16c274:34053 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false 2024-11-20T04:32:56,200 INFO [M:0;c2a32e16c274:34053 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T04:32:56,200 DEBUG [M:0;c2a32e16c274:34053 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732077176086Disabling compacts and flushes for region at 1732077176086Disabling writes for close at 1732077176086Obtaining lock to block concurrent updates at 1732077176086Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732077176086Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732077176087 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732077176087Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732077176087Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732077176101 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732077176101Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732077176110 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732077176124 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732077176124Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732077176134 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732077176148 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732077176148Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732077176158 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732077176172 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732077176172Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1827890a: reopening flushed file at 1732077176181 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76080fe7: reopening flushed file at 1732077176185 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1edf2464: reopening flushed file at 1732077176189 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47c0e53c: reopening flushed file at 1732077176193 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false at 1732077176198 (+5 ms)Writing region close event to WAL at 1732077176200 (+2 ms)Closed at 1732077176200 2024-11-20T04:32:56,200 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,200 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,200 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,200 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,200 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T04:32:56,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42479 is added to blk_1073741830_1006 (size=10311) 2024-11-20T04:32:56,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34551 is added to blk_1073741830_1006 (size=10311) 2024-11-20T04:32:56,203 INFO [M:0;c2a32e16c274:34053 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T04:32:56,203 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T04:32:56,203 INFO [M:0;c2a32e16c274:34053 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34053 2024-11-20T04:32:56,203 INFO [M:0;c2a32e16c274:34053 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T04:32:56,305 INFO [M:0;c2a32e16c274:34053 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T04:32:56,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:32:56,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34053-0x10133a62b230000, quorum=127.0.0.1:58278, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T04:32:56,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64440bf6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:32:56,308 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2551ca75{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:32:56,308 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:32:56,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f638c2a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:32:56,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2fb7753b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/hadoop.log.dir/,STOPPED} 2024-11-20T04:32:56,309 WARN [BP-802853067-172.17.0.2-1732077174301 heartbeating to localhost/127.0.0.1:33535 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:32:56,309 WARN [BP-802853067-172.17.0.2-1732077174301 heartbeating to localhost/127.0.0.1:33535 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-802853067-172.17.0.2-1732077174301 (Datanode Uuid 89e707ec-ddbc-4b56-8051-76684a93c5a9) service to localhost/127.0.0.1:33535 2024-11-20T04:32:56,309 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:32:56,309 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:32:56,310 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/data/data3/current/BP-802853067-172.17.0.2-1732077174301 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:32:56,310 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/data/data4/current/BP-802853067-172.17.0.2-1732077174301 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:32:56,310 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:32:56,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@413a6699{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T04:32:56,312 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1cfa6b2d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:32:56,312 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:32:56,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2578bc63{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:32:56,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e818af2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/hadoop.log.dir/,STOPPED} 2024-11-20T04:32:56,313 WARN [BP-802853067-172.17.0.2-1732077174301 heartbeating to localhost/127.0.0.1:33535 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T04:32:56,313 WARN [BP-802853067-172.17.0.2-1732077174301 heartbeating to localhost/127.0.0.1:33535 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-802853067-172.17.0.2-1732077174301 (Datanode Uuid bcd3f976-15e1-4adb-823e-fb7f0c4b2004) service to localhost/127.0.0.1:33535 2024-11-20T04:32:56,313 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T04:32:56,313 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T04:32:56,314 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/data/data1/current/BP-802853067-172.17.0.2-1732077174301 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:32:56,314 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/cluster_ab6990b0-75b2-6dad-fdc7-a832b9c481bd/data/data2/current/BP-802853067-172.17.0.2-1732077174301 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T04:32:56,314 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T04:32:56,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1926aa54{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T04:32:56,320 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18478920{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T04:32:56,320 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T04:32:56,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78047c32{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T04:32:56,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d7cc0b1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/823737e4-dde1-c795-7c0e-6fbc9389ed2d/hadoop.log.dir/,STOPPED} 2024-11-20T04:32:56,326 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T04:32:56,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T04:32:56,352 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 226) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33535 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33535 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:33535 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33535 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33535 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33535 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33535 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33535 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=18 (was 20), ProcessCount=11 (was 11), AvailableMemoryMB=7065 (was 7065)