2024-11-16 11:33:35,450 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 11:33:35,461 main DEBUG Took 0.009029 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-16 11:33:35,461 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-16 11:33:35,462 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-16 11:33:35,462 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-16 11:33:35,463 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,470 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-16 11:33:35,482 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,484 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,485 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,485 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,486 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,486 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,487 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,488 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,488 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,489 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,490 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,490 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,491 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,491 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,492 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,492 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,493 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,494 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,494 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,495 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,495 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,495 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,496 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,496 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 11:33:35,497 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,497 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-16 11:33:35,499 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 11:33:35,501 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-16 11:33:35,503 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-16 11:33:35,504 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-16 11:33:35,505 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-16 11:33:35,506 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-16 11:33:35,513 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-16 11:33:35,515 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-16 11:33:35,517 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-16 11:33:35,518 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-16 11:33:35,518 main DEBUG createAppenders(={Console}) 2024-11-16 11:33:35,519 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-16 11:33:35,519 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 11:33:35,520 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-16 11:33:35,520 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-16 11:33:35,521 main DEBUG OutputStream closed 2024-11-16 11:33:35,521 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-16 11:33:35,521 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-16 11:33:35,522 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-16 11:33:35,603 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-16 11:33:35,605 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-16 11:33:35,606 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-16 11:33:35,608 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-16 11:33:35,609 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-16 11:33:35,609 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-16 11:33:35,610 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-16 11:33:35,610 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-16 11:33:35,611 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-16 11:33:35,611 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-16 11:33:35,612 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-16 11:33:35,612 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-16 11:33:35,612 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-16 11:33:35,613 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-16 11:33:35,613 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-16 11:33:35,614 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-16 11:33:35,614 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-16 11:33:35,615 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-16 11:33:35,618 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16 11:33:35,618 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-16 11:33:35,618 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-16 11:33:35,619 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-16T11:33:35,871 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062 2024-11-16 11:33:35,874 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-16 11:33:35,874 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16T11:33:35,882 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-16T11:33:35,916 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=224, ProcessCount=11, AvailableMemoryMB=5445 2024-11-16T11:33:35,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T11:33:35,933 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143, deleteOnExit=true 2024-11-16T11:33:35,933 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T11:33:35,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/test.cache.data in system properties and HBase conf 2024-11-16T11:33:35,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T11:33:35,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.log.dir in system properties and HBase conf 2024-11-16T11:33:35,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T11:33:35,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T11:33:35,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T11:33:36,016 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-16T11:33:36,096 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T11:33:36,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:33:36,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:33:36,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T11:33:36,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:33:36,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T11:33:36,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T11:33:36,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:33:36,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:33:36,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T11:33:36,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/nfs.dump.dir in system properties and HBase conf 2024-11-16T11:33:36,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/java.io.tmpdir in system properties and HBase conf 2024-11-16T11:33:36,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:33:36,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T11:33:36,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T11:33:36,564 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:33:37,098 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-16T11:33:37,166 INFO [Time-limited test {}] log.Log(170): Logging initialized @2387ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-16T11:33:37,231 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:33:37,290 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:33:37,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:33:37,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:33:37,309 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:33:37,319 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:33:37,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73b23f80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:33:37,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18d8eba1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:33:37,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@595f45d4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/java.io.tmpdir/jetty-localhost-40103-hadoop-hdfs-3_4_1-tests_jar-_-any-3973174660417641023/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:33:37,502 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1298d5a2{HTTP/1.1, (http/1.1)}{localhost:40103} 2024-11-16T11:33:37,503 INFO [Time-limited test {}] server.Server(415): Started @2725ms 2024-11-16T11:33:37,525 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:33:38,156 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:33:38,164 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:33:38,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:33:38,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:33:38,165 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:33:38,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21c149f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:33:38,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@616d254c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:33:38,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c4805bf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/java.io.tmpdir/jetty-localhost-33399-hadoop-hdfs-3_4_1-tests_jar-_-any-1815121873037739134/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:33:38,272 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65cf0e3b{HTTP/1.1, (http/1.1)}{localhost:33399} 2024-11-16T11:33:38,272 INFO [Time-limited test {}] server.Server(415): Started @3494ms 2024-11-16T11:33:38,322 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:33:38,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:33:38,428 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:33:38,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:33:38,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:33:38,430 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:33:38,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f51668d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:33:38,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1612a852{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:33:38,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50b8c1e0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/java.io.tmpdir/jetty-localhost-46827-hadoop-hdfs-3_4_1-tests_jar-_-any-2282217509150237245/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:33:38,541 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1225e5c2{HTTP/1.1, (http/1.1)}{localhost:46827} 2024-11-16T11:33:38,541 INFO [Time-limited test {}] server.Server(415): Started @3763ms 2024-11-16T11:33:38,544 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:33:39,814 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/data/data1/current/BP-412134530-172.17.0.2-1731756816644/current, will proceed with Du for space computation calculation, 2024-11-16T11:33:39,814 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/data/data3/current/BP-412134530-172.17.0.2-1731756816644/current, will proceed with Du for space computation calculation, 2024-11-16T11:33:39,814 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/data/data4/current/BP-412134530-172.17.0.2-1731756816644/current, will proceed with Du for space computation calculation, 2024-11-16T11:33:39,814 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/data/data2/current/BP-412134530-172.17.0.2-1731756816644/current, will proceed with Du for space computation calculation, 2024-11-16T11:33:39,843 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:33:39,843 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:33:39,895 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31f8d818e9595707 with lease ID 0xb90b6015f951c0d9: Processing first storage report for DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d from datanode DatanodeRegistration(127.0.0.1:39897, datanodeUuid=33cb1a64-49cf-4211-99dc-20553c8ca126, infoPort=45557, infoSecurePort=0, ipcPort=46301, storageInfo=lv=-57;cid=testClusterID;nsid=420216731;c=1731756816644) 2024-11-16T11:33:39,896 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31f8d818e9595707 with lease ID 0xb90b6015f951c0d9: from storage DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d node DatanodeRegistration(127.0.0.1:39897, datanodeUuid=33cb1a64-49cf-4211-99dc-20553c8ca126, infoPort=45557, infoSecurePort=0, ipcPort=46301, storageInfo=lv=-57;cid=testClusterID;nsid=420216731;c=1731756816644), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T11:33:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x60b1cee03635aca3 with lease ID 0xb90b6015f951c0da: Processing first storage report for DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c from datanode DatanodeRegistration(127.0.0.1:37779, datanodeUuid=3665947b-5d27-45f3-879f-8ddcaf5550fc, infoPort=43143, infoSecurePort=0, ipcPort=45625, storageInfo=lv=-57;cid=testClusterID;nsid=420216731;c=1731756816644) 2024-11-16T11:33:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60b1cee03635aca3 with lease ID 0xb90b6015f951c0da: from storage DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c node DatanodeRegistration(127.0.0.1:37779, datanodeUuid=3665947b-5d27-45f3-879f-8ddcaf5550fc, infoPort=43143, infoSecurePort=0, ipcPort=45625, storageInfo=lv=-57;cid=testClusterID;nsid=420216731;c=1731756816644), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T11:33:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31f8d818e9595707 with lease ID 0xb90b6015f951c0d9: Processing first storage report for DS-e3d82691-dc6a-4958-95c7-2d2f9ed5a78c from datanode DatanodeRegistration(127.0.0.1:39897, datanodeUuid=33cb1a64-49cf-4211-99dc-20553c8ca126, infoPort=45557, infoSecurePort=0, ipcPort=46301, storageInfo=lv=-57;cid=testClusterID;nsid=420216731;c=1731756816644) 2024-11-16T11:33:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31f8d818e9595707 with lease ID 0xb90b6015f951c0d9: from storage DS-e3d82691-dc6a-4958-95c7-2d2f9ed5a78c node DatanodeRegistration(127.0.0.1:39897, datanodeUuid=33cb1a64-49cf-4211-99dc-20553c8ca126, infoPort=45557, infoSecurePort=0, ipcPort=46301, storageInfo=lv=-57;cid=testClusterID;nsid=420216731;c=1731756816644), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:33:39,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x60b1cee03635aca3 with lease ID 0xb90b6015f951c0da: Processing first storage report for DS-108311a8-fcf9-4e97-825c-adec25215181 from datanode DatanodeRegistration(127.0.0.1:37779, datanodeUuid=3665947b-5d27-45f3-879f-8ddcaf5550fc, infoPort=43143, infoSecurePort=0, ipcPort=45625, storageInfo=lv=-57;cid=testClusterID;nsid=420216731;c=1731756816644) 2024-11-16T11:33:39,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60b1cee03635aca3 with lease ID 0xb90b6015f951c0da: from storage DS-108311a8-fcf9-4e97-825c-adec25215181 node DatanodeRegistration(127.0.0.1:37779, datanodeUuid=3665947b-5d27-45f3-879f-8ddcaf5550fc, infoPort=43143, infoSecurePort=0, ipcPort=45625, storageInfo=lv=-57;cid=testClusterID;nsid=420216731;c=1731756816644), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:33:39,965 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062 2024-11-16T11:33:40,029 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/zookeeper_0, clientPort=56083, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T11:33:40,038 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56083 2024-11-16T11:33:40,051 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:33:40,053 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:33:40,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:33:40,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:33:40,671 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062 with version=8 2024-11-16T11:33:40,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/hbase-staging 2024-11-16T11:33:40,753 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-16T11:33:40,943 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:33:40,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:33:40,952 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:33:40,956 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:33:40,956 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:33:40,956 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:33:41,085 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T11:33:41,141 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-16T11:33:41,151 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-16T11:33:41,155 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:33:41,179 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 102463 (auto-detected) 2024-11-16T11:33:41,181 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-16T11:33:41,199 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42855 2024-11-16T11:33:41,218 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42855 connecting to ZooKeeper ensemble=127.0.0.1:56083 2024-11-16T11:33:41,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428550x0, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:33:41,344 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42855-0x101436c01d80000 connected 2024-11-16T11:33:41,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:33:41,457 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:33:41,467 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:33:41,470 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062, hbase.cluster.distributed=false 2024-11-16T11:33:41,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:33:41,495 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42855 2024-11-16T11:33:41,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42855 2024-11-16T11:33:41,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42855 2024-11-16T11:33:41,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42855 2024-11-16T11:33:41,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42855 2024-11-16T11:33:41,586 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:33:41,588 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:33:41,588 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:33:41,588 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:33:41,588 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:33:41,588 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:33:41,591 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T11:33:41,593 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:33:41,594 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43973 2024-11-16T11:33:41,596 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43973 connecting to ZooKeeper ensemble=127.0.0.1:56083 2024-11-16T11:33:41,597 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:33:41,602 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:33:41,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:439730x0, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:33:41,620 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:439730x0, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:33:41,620 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43973-0x101436c01d80001 connected 2024-11-16T11:33:41,623 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T11:33:41,630 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T11:33:41,632 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T11:33:41,637 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:33:41,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43973 2024-11-16T11:33:41,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43973 2024-11-16T11:33:41,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43973 2024-11-16T11:33:41,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43973 2024-11-16T11:33:41,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43973 2024-11-16T11:33:41,655 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7948fca2832:42855 2024-11-16T11:33:41,656 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7948fca2832,42855,1731756820796 2024-11-16T11:33:41,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:33:41,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:33:41,674 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7948fca2832,42855,1731756820796 2024-11-16T11:33:41,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:41,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T11:33:41,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:41,706 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T11:33:41,707 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7948fca2832,42855,1731756820796 from backup master directory 2024-11-16T11:33:41,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7948fca2832,42855,1731756820796 2024-11-16T11:33:41,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:33:41,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:33:41,716 WARN [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:33:41,716 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7948fca2832,42855,1731756820796 2024-11-16T11:33:41,718 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-16T11:33:41,719 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-16T11:33:41,777 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/hbase.id] with ID: ad423bf2-a18b-426b-af99-a3bee82e0ab9 2024-11-16T11:33:41,777 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/.tmp/hbase.id 2024-11-16T11:33:41,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:33:41,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:33:41,790 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/.tmp/hbase.id]:[hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/hbase.id] 2024-11-16T11:33:41,836 INFO [master/a7948fca2832:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:33:41,842 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T11:33:41,861 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-16T11:33:41,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:41,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:41,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:33:41,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:33:41,904 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:33:41,906 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T11:33:41,911 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:33:41,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:33:41,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:33:41,957 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store 2024-11-16T11:33:41,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:33:41,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:33:41,982 INFO [master/a7948fca2832:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-16T11:33:41,986 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:33:41,987 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:33:41,987 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:33:41,987 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:33:41,989 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:33:41,989 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:33:41,989 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:33:41,991 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731756821987Disabling compacts and flushes for region at 1731756821987Disabling writes for close at 1731756821989 (+2 ms)Writing region close event to WAL at 1731756821989Closed at 1731756821989 2024-11-16T11:33:41,993 WARN [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/.initializing 2024-11-16T11:33:41,993 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/WALs/a7948fca2832,42855,1731756820796 2024-11-16T11:33:42,018 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C42855%2C1731756820796, suffix=, logDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/WALs/a7948fca2832,42855,1731756820796, archiveDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/oldWALs, maxLogs=10 2024-11-16T11:33:42,029 INFO [master/a7948fca2832:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C42855%2C1731756820796.1731756822024 2024-11-16T11:33:42,051 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/WALs/a7948fca2832,42855,1731756820796/a7948fca2832%2C42855%2C1731756820796.1731756822024 2024-11-16T11:33:42,062 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45557:45557),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-16T11:33:42,065 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:33:42,066 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:33:42,070 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,071 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,129 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T11:33:42,133 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:42,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:33:42,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T11:33:42,139 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:42,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:33:42,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T11:33:42,143 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:42,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:33:42,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T11:33:42,147 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:42,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:33:42,148 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,153 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,154 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,161 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,161 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,165 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T11:33:42,169 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:33:42,173 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:33:42,174 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710500, jitterRate=-0.09655363857746124}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T11:33:42,183 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731756822083Initializing all the Stores at 1731756822085 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756822086 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756822086Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756822087 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756822087Cleaning up temporary data from old regions at 1731756822162 (+75 ms)Region opened successfully at 1731756822183 (+21 ms) 2024-11-16T11:33:42,184 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T11:33:42,216 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d61174c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:33:42,241 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T11:33:42,250 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T11:33:42,250 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T11:33:42,253 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T11:33:42,254 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-16T11:33:42,260 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-16T11:33:42,260 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T11:33:42,292 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T11:33:42,303 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T11:33:42,362 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T11:33:42,366 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T11:33:42,369 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T11:33:42,378 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T11:33:42,380 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T11:33:42,385 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T11:33:42,398 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T11:33:42,400 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T11:33:42,410 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T11:33:42,432 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T11:33:42,441 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T11:33:42,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:33:42,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:33:42,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:42,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:42,456 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7948fca2832,42855,1731756820796, sessionid=0x101436c01d80000, setting cluster-up flag (Was=false) 2024-11-16T11:33:42,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:42,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:42,536 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T11:33:42,539 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,42855,1731756820796 2024-11-16T11:33:42,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:42,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:42,862 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T11:33:42,865 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,42855,1731756820796 2024-11-16T11:33:42,871 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T11:33:42,943 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T11:33:42,946 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(746): ClusterId : ad423bf2-a18b-426b-af99-a3bee82e0ab9 2024-11-16T11:33:42,948 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T11:33:42,955 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T11:33:42,962 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T11:33:42,970 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T11:33:42,970 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T11:33:42,968 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7948fca2832,42855,1731756820796 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T11:33:42,979 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T11:33:42,980 DEBUG [RS:0;a7948fca2832:43973 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c65ca5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:33:42,980 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:33:42,981 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:33:42,981 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:33:42,981 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:33:42,981 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7948fca2832:0, corePoolSize=10, maxPoolSize=10 2024-11-16T11:33:42,981 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:42,981 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:33:42,981 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:42,986 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731756852986 2024-11-16T11:33:42,988 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:33:42,988 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T11:33:42,988 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T11:33:42,989 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T11:33:42,992 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T11:33:42,992 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T11:33:42,993 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T11:33:42,993 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T11:33:42,994 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:42,994 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T11:33:42,996 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,000 DEBUG [RS:0;a7948fca2832:43973 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7948fca2832:43973 2024-11-16T11:33:43,000 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T11:33:43,001 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T11:33:43,002 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T11:33:43,003 INFO [RS:0;a7948fca2832:43973 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T11:33:43,004 INFO [RS:0;a7948fca2832:43973 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T11:33:43,004 DEBUG [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T11:33:43,007 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7948fca2832,42855,1731756820796 with port=43973, startcode=1731756821555 2024-11-16T11:33:43,008 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T11:33:43,009 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T11:33:43,011 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756823010,5,FailOnTimeoutGroup] 2024-11-16T11:33:43,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:33:43,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:33:43,012 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756823011,5,FailOnTimeoutGroup] 2024-11-16T11:33:43,012 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,012 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T11:33:43,014 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,014 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,014 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T11:33:43,015 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062 2024-11-16T11:33:43,019 DEBUG [RS:0;a7948fca2832:43973 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T11:33:43,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:33:43,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:33:43,033 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:33:43,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:33:43,039 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:33:43,039 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:43,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:33:43,041 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:33:43,043 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:33:43,043 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:43,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:33:43,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:33:43,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:33:43,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:43,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:33:43,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:33:43,054 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:33:43,054 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:43,055 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:33:43,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:33:43,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740 2024-11-16T11:33:43,059 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740 2024-11-16T11:33:43,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:33:43,063 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:33:43,064 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:33:43,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:33:43,079 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:33:43,080 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819277, jitterRate=0.04176515340805054}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:33:43,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731756823034Initializing all the Stores at 1731756823035 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756823036 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756823036Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756823036Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756823036Cleaning up temporary data from old regions at 1731756823063 (+27 ms)Region opened successfully at 1731756823083 (+20 ms) 2024-11-16T11:33:43,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:33:43,084 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:33:43,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:33:43,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:33:43,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:33:43,085 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:33:43,086 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731756823084Disabling compacts and flushes for region at 1731756823084Disabling writes for close at 1731756823084Writing region close event to WAL at 1731756823085 (+1 ms)Closed at 1731756823085 2024-11-16T11:33:43,089 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:33:43,089 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T11:33:43,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T11:33:43,097 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34537, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T11:33:43,105 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42855 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7948fca2832,43973,1731756821555 2024-11-16T11:33:43,105 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:33:43,108 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T11:33:43,108 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42855 {}] master.ServerManager(517): Registering regionserver=a7948fca2832,43973,1731756821555 2024-11-16T11:33:43,122 DEBUG [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062 2024-11-16T11:33:43,123 DEBUG [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39015 2024-11-16T11:33:43,123 DEBUG [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T11:33:43,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:33:43,175 DEBUG [RS:0;a7948fca2832:43973 {}] zookeeper.ZKUtil(111): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7948fca2832,43973,1731756821555 2024-11-16T11:33:43,175 WARN [RS:0;a7948fca2832:43973 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:33:43,176 INFO [RS:0;a7948fca2832:43973 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:33:43,176 DEBUG [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555 2024-11-16T11:33:43,179 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7948fca2832,43973,1731756821555] 2024-11-16T11:33:43,206 INFO [RS:0;a7948fca2832:43973 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T11:33:43,220 INFO [RS:0;a7948fca2832:43973 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T11:33:43,224 INFO [RS:0;a7948fca2832:43973 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T11:33:43,224 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,225 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T11:33:43,231 INFO [RS:0;a7948fca2832:43973 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T11:33:43,232 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,233 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,233 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,233 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,233 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,233 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,233 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:33:43,234 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,234 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,234 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,234 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,234 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,234 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:33:43,234 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:33:43,235 DEBUG [RS:0;a7948fca2832:43973 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:33:43,235 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,236 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,236 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,236 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,236 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,236 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,43973,1731756821555-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:33:43,252 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T11:33:43,254 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,43973,1731756821555-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,254 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,255 INFO [RS:0;a7948fca2832:43973 {}] regionserver.Replication(171): a7948fca2832,43973,1731756821555 started 2024-11-16T11:33:43,259 WARN [a7948fca2832:42855 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T11:33:43,271 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:43,271 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(1482): Serving as a7948fca2832,43973,1731756821555, RpcServer on a7948fca2832/172.17.0.2:43973, sessionid=0x101436c01d80001 2024-11-16T11:33:43,272 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T11:33:43,272 DEBUG [RS:0;a7948fca2832:43973 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7948fca2832,43973,1731756821555 2024-11-16T11:33:43,272 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,43973,1731756821555' 2024-11-16T11:33:43,273 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T11:33:43,274 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T11:33:43,274 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T11:33:43,274 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T11:33:43,275 DEBUG [RS:0;a7948fca2832:43973 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7948fca2832,43973,1731756821555 2024-11-16T11:33:43,275 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,43973,1731756821555' 2024-11-16T11:33:43,275 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T11:33:43,275 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T11:33:43,276 DEBUG [RS:0;a7948fca2832:43973 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T11:33:43,276 INFO [RS:0;a7948fca2832:43973 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T11:33:43,276 INFO [RS:0;a7948fca2832:43973 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T11:33:43,383 INFO [RS:0;a7948fca2832:43973 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C43973%2C1731756821555, suffix=, logDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555, archiveDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs, maxLogs=32 2024-11-16T11:33:43,386 INFO [RS:0;a7948fca2832:43973 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.1731756823386 2024-11-16T11:33:43,395 INFO [RS:0;a7948fca2832:43973 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756823386 2024-11-16T11:33:43,399 DEBUG [RS:0;a7948fca2832:43973 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45557:45557),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-16T11:33:43,511 DEBUG [a7948fca2832:42855 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T11:33:43,521 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7948fca2832,43973,1731756821555 2024-11-16T11:33:43,527 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,43973,1731756821555, state=OPENING 2024-11-16T11:33:43,567 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T11:33:43,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:43,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:33:43,642 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:33:43,642 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:33:43,646 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:33:43,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,43973,1731756821555}] 2024-11-16T11:33:43,833 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T11:33:43,837 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55789, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T11:33:43,848 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T11:33:43,849 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:33:43,852 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C43973%2C1731756821555.meta, suffix=.meta, logDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555, archiveDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs, maxLogs=32 2024-11-16T11:33:43,854 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.meta.1731756823854.meta 2024-11-16T11:33:43,862 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.meta.1731756823854.meta 2024-11-16T11:33:43,864 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43143:43143),(127.0.0.1/127.0.0.1:45557:45557)] 2024-11-16T11:33:43,866 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:33:43,868 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T11:33:43,870 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T11:33:43,874 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T11:33:43,878 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T11:33:43,879 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:33:43,879 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T11:33:43,879 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T11:33:43,882 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:33:43,884 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:33:43,884 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:43,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:33:43,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:33:43,886 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:33:43,886 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:43,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:33:43,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:33:43,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:33:43,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:43,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:33:43,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:33:43,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:33:43,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:43,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:33:43,892 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:33:43,893 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740 2024-11-16T11:33:43,895 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740 2024-11-16T11:33:43,898 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:33:43,898 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:33:43,899 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:33:43,901 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:33:43,903 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745069, jitterRate=-0.05259668827056885}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:33:43,903 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T11:33:43,904 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731756823880Writing region info on filesystem at 1731756823880Initializing all the Stores at 1731756823881 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756823882 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756823882Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756823882Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756823882Cleaning up temporary data from old regions at 1731756823898 (+16 ms)Running coprocessor post-open hooks at 1731756823903 (+5 ms)Region opened successfully at 1731756823904 (+1 ms) 2024-11-16T11:33:43,910 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731756823825 2024-11-16T11:33:43,921 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T11:33:43,921 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T11:33:43,923 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,43973,1731756821555 2024-11-16T11:33:43,925 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,43973,1731756821555, state=OPEN 2024-11-16T11:33:44,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:33:44,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:33:44,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:33:44,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:33:44,002 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7948fca2832,43973,1731756821555 2024-11-16T11:33:44,007 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T11:33:44,008 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,43973,1731756821555 in 353 msec 2024-11-16T11:33:44,014 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T11:33:44,014 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 913 msec 2024-11-16T11:33:44,016 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:33:44,016 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T11:33:44,033 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:33:44,034 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,43973,1731756821555, seqNum=-1] 2024-11-16T11:33:44,052 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:33:44,055 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45303, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:33:44,076 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1750 sec 2024-11-16T11:33:44,076 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731756824076, completionTime=-1 2024-11-16T11:33:44,079 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T11:33:44,079 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T11:33:44,107 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T11:33:44,108 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731756884107 2024-11-16T11:33:44,108 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731756944108 2024-11-16T11:33:44,108 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 28 msec 2024-11-16T11:33:44,110 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42855,1731756820796-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:44,111 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42855,1731756820796-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:44,111 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42855,1731756820796-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:44,112 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7948fca2832:42855, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:44,113 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:44,113 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:44,119 DEBUG [master/a7948fca2832:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T11:33:44,140 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.423sec 2024-11-16T11:33:44,141 INFO [master/a7948fca2832:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T11:33:44,142 INFO [master/a7948fca2832:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T11:33:44,143 INFO [master/a7948fca2832:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T11:33:44,143 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T11:33:44,143 INFO [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T11:33:44,144 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42855,1731756820796-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:33:44,145 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42855,1731756820796-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T11:33:44,153 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T11:33:44,153 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T11:33:44,154 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42855,1731756820796-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:33:44,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2570660c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:33:44,161 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-16T11:33:44,162 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-16T11:33:44,165 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7948fca2832,42855,-1 for getting cluster id 2024-11-16T11:33:44,169 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T11:33:44,176 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ad423bf2-a18b-426b-af99-a3bee82e0ab9' 2024-11-16T11:33:44,180 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T11:33:44,180 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ad423bf2-a18b-426b-af99-a3bee82e0ab9" 2024-11-16T11:33:44,182 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2461ffc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:33:44,182 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7948fca2832,42855,-1] 2024-11-16T11:33:44,184 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T11:33:44,186 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:33:44,188 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55992, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T11:33:44,191 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@125b7638, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:33:44,191 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:33:44,198 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,43973,1731756821555, seqNum=-1] 2024-11-16T11:33:44,199 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:33:44,202 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38862, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:33:44,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7948fca2832,42855,1731756820796 2024-11-16T11:33:44,224 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:33:44,246 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T11:33:44,249 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T11:33:44,254 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is a7948fca2832,42855,1731756820796 2024-11-16T11:33:44,256 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@14182cac 2024-11-16T11:33:44,257 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T11:33:44,260 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56006, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T11:33:44,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T11:33:44,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T11:33:44,267 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:33:44,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-16T11:33:44,283 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T11:33:44,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-16T11:33:44,287 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:44,290 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T11:33:44,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:33:44,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741835_1011 (size=389) 2024-11-16T11:33:44,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741835_1011 (size=389) 2024-11-16T11:33:44,336 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3e48931017169beba38f17f93d99f245, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062 2024-11-16T11:33:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741836_1012 (size=72) 2024-11-16T11:33:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741836_1012 (size=72) 2024-11-16T11:33:44,347 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:33:44,347 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 3e48931017169beba38f17f93d99f245, disabling compactions & flushes 2024-11-16T11:33:44,347 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:33:44,347 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:33:44,347 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. after waiting 0 ms 2024-11-16T11:33:44,347 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:33:44,347 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:33:44,347 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3e48931017169beba38f17f93d99f245: Waiting for close lock at 1731756824347Disabling compacts and flushes for region at 1731756824347Disabling writes for close at 1731756824347Writing region close event to WAL at 1731756824347Closed at 1731756824347 2024-11-16T11:33:44,350 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T11:33:44,355 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731756824350"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731756824350"}]},"ts":"1731756824350"} 2024-11-16T11:33:44,360 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T11:33:44,362 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T11:33:44,365 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731756824362"}]},"ts":"1731756824362"} 2024-11-16T11:33:44,369 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-16T11:33:44,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3e48931017169beba38f17f93d99f245, ASSIGN}] 2024-11-16T11:33:44,373 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3e48931017169beba38f17f93d99f245, ASSIGN 2024-11-16T11:33:44,375 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3e48931017169beba38f17f93d99f245, ASSIGN; state=OFFLINE, location=a7948fca2832,43973,1731756821555; forceNewPlan=false, retain=false 2024-11-16T11:33:44,529 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3e48931017169beba38f17f93d99f245, regionState=OPENING, regionLocation=a7948fca2832,43973,1731756821555 2024-11-16T11:33:44,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3e48931017169beba38f17f93d99f245, ASSIGN because future has completed 2024-11-16T11:33:44,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3e48931017169beba38f17f93d99f245, server=a7948fca2832,43973,1731756821555}] 2024-11-16T11:33:44,699 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:33:44,700 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3e48931017169beba38f17f93d99f245, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:33:44,700 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,700 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:33:44,700 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,700 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,703 INFO [StoreOpener-3e48931017169beba38f17f93d99f245-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,705 INFO [StoreOpener-3e48931017169beba38f17f93d99f245-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e48931017169beba38f17f93d99f245 columnFamilyName info 2024-11-16T11:33:44,706 DEBUG [StoreOpener-3e48931017169beba38f17f93d99f245-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:33:44,707 INFO [StoreOpener-3e48931017169beba38f17f93d99f245-1 {}] regionserver.HStore(327): Store=3e48931017169beba38f17f93d99f245/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:33:44,707 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,708 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,709 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,710 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,710 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,713 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,716 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:33:44,717 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3e48931017169beba38f17f93d99f245; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700438, jitterRate=-0.10934777557849884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T11:33:44,717 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3e48931017169beba38f17f93d99f245 2024-11-16T11:33:44,718 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3e48931017169beba38f17f93d99f245: Running coprocessor pre-open hook at 1731756824701Writing region info on filesystem at 1731756824701Initializing all the Stores at 1731756824702 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756824702Cleaning up temporary data from old regions at 1731756824710 (+8 ms)Running coprocessor post-open hooks at 1731756824717 (+7 ms)Region opened successfully at 1731756824718 (+1 ms) 2024-11-16T11:33:44,720 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245., pid=6, masterSystemTime=1731756824693 2024-11-16T11:33:44,724 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:33:44,724 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:33:44,726 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3e48931017169beba38f17f93d99f245, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,43973,1731756821555 2024-11-16T11:33:44,731 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3e48931017169beba38f17f93d99f245, server=a7948fca2832,43973,1731756821555 because future has completed 2024-11-16T11:33:44,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T11:33:44,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3e48931017169beba38f17f93d99f245, server=a7948fca2832,43973,1731756821555 in 196 msec 2024-11-16T11:33:44,744 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T11:33:44,744 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3e48931017169beba38f17f93d99f245, ASSIGN in 367 msec 2024-11-16T11:33:44,745 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T11:33:44,746 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731756824746"}]},"ts":"1731756824746"} 2024-11-16T11:33:44,750 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-16T11:33:44,752 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T11:33:44,755 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 478 msec 2024-11-16T11:33:49,394 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-16T11:33:49,446 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T11:33:49,448 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-16T11:33:51,138 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T11:33:51,138 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T11:33:51,142 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T11:33:51,142 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T11:33:51,145 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:33:51,145 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T11:33:51,146 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T11:33:51,146 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T11:33:54,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:33:54,311 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-16T11:33:54,320 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-16T11:33:54,326 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-16T11:33:54,327 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:33:54,327 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.1731756834327 2024-11-16T11:33:54,335 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:33:54,336 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:33:54,336 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:33:54,336 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:33:54,336 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:33:54,337 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756823386 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756834327 2024-11-16T11:33:54,338 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43143:43143),(127.0.0.1/127.0.0.1:45557:45557)] 2024-11-16T11:33:54,338 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756823386 is not closed yet, will try archiving it next time 2024-11-16T11:33:54,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741833_1009 (size=451) 2024-11-16T11:33:54,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741833_1009 (size=451) 2024-11-16T11:33:54,342 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756823386 to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs/a7948fca2832%2C43973%2C1731756821555.1731756823386 2024-11-16T11:33:54,347 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245., hostname=a7948fca2832,43973,1731756821555, seqNum=2] 2024-11-16T11:34:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43973 {}] regionserver.HRegion(8855): Flush requested on 3e48931017169beba38f17f93d99f245 2024-11-16T11:34:06,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3e48931017169beba38f17f93d99f245 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:34:06,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/c106d36c1bc54b8ba7a0218eb324d357 is 1080, key is row0001/info:/1731756834350/Put/seqid=0 2024-11-16T11:34:06,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741838_1014 (size=12509) 2024-11-16T11:34:06,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741838_1014 (size=12509) 2024-11-16T11:34:06,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/c106d36c1bc54b8ba7a0218eb324d357 2024-11-16T11:34:06,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/c106d36c1bc54b8ba7a0218eb324d357 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/c106d36c1bc54b8ba7a0218eb324d357 2024-11-16T11:34:06,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/c106d36c1bc54b8ba7a0218eb324d357, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T11:34:06,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3e48931017169beba38f17f93d99f245 in 182ms, sequenceid=11, compaction requested=false 2024-11-16T11:34:06,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3e48931017169beba38f17f93d99f245: 2024-11-16T11:34:09,963 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T11:34:14,411 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.1731756854411 2024-11-16T11:34:14,619 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK]] 2024-11-16T11:34:14,620 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:14,620 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:14,620 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:14,620 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:14,620 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:14,621 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756834327 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756854411 2024-11-16T11:34:14,622 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45557:45557),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-16T11:34:14,622 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756834327 is not closed yet, will try archiving it next time 2024-11-16T11:34:14,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741837_1013 (size=12399) 2024-11-16T11:34:14,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741837_1013 (size=12399) 2024-11-16T11:34:14,825 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:17,029 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:19,234 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:21,438 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:21,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43973 {}] regionserver.HRegion(8855): Flush requested on 3e48931017169beba38f17f93d99f245 2024-11-16T11:34:21,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3e48931017169beba38f17f93d99f245 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:34:21,640 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:21,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/b853e976c1c9474b90a9743f39a8a303 is 1080, key is row0008/info:/1731756848400/Put/seqid=0 2024-11-16T11:34:21,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741840_1016 (size=12509) 2024-11-16T11:34:21,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741840_1016 (size=12509) 2024-11-16T11:34:21,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/b853e976c1c9474b90a9743f39a8a303 2024-11-16T11:34:21,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/b853e976c1c9474b90a9743f39a8a303 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/b853e976c1c9474b90a9743f39a8a303 2024-11-16T11:34:21,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/b853e976c1c9474b90a9743f39a8a303, entries=7, sequenceid=21, filesize=12.2 K 2024-11-16T11:34:21,881 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:21,881 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3e48931017169beba38f17f93d99f245 in 443ms, sequenceid=21, compaction requested=false 2024-11-16T11:34:21,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3e48931017169beba38f17f93d99f245: 2024-11-16T11:34:21,881 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-16T11:34:21,881 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:34:21,882 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/c106d36c1bc54b8ba7a0218eb324d357 because midkey is the same as first or last row 2024-11-16T11:34:23,643 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:24,220 INFO [master/a7948fca2832:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T11:34:24,220 INFO [master/a7948fca2832:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T11:34:25,846 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:25,848 WARN [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:25,849 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C43973%2C1731756821555:(num 1731756854411) roll requested 2024-11-16T11:34:25,850 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.1731756865849 2024-11-16T11:34:26,058 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:26,059 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:26,059 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:26,059 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:26,059 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:26,059 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:26,059 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756854411 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756865849 2024-11-16T11:34:26,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741839_1015 (size=7739) 2024-11-16T11:34:26,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741839_1015 (size=7739) 2024-11-16T11:34:26,063 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45557:45557),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-16T11:34:26,063 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756854411 is not closed yet, will try archiving it next time 2024-11-16T11:34:26,064 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756834327 to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs/a7948fca2832%2C43973%2C1731756821555.1731756834327 2024-11-16T11:34:28,050 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:29,700 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3e48931017169beba38f17f93d99f245, had cached 0 bytes from a total of 25018 2024-11-16T11:34:30,255 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:32,459 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:34,667 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:36,671 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T11:34:36,671 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.1731756876671 2024-11-16T11:34:39,963 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T11:34:41,689 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:41,691 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:41,692 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C43973%2C1731756821555:(num 1731756876671) roll requested 2024-11-16T11:34:41,692 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:41,692 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:41,692 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:41,692 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:41,693 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:41,693 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756865849 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756876671 2024-11-16T11:34:41,694 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43143:43143),(127.0.0.1/127.0.0.1:45557:45557)] 2024-11-16T11:34:41,694 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756865849 is not closed yet, will try archiving it next time 2024-11-16T11:34:41,695 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.1731756881695 2024-11-16T11:34:41,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741841_1017 (size=4753) 2024-11-16T11:34:41,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741841_1017 (size=4753) 2024-11-16T11:34:46,700 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK]] 2024-11-16T11:34:46,700 WARN [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK]] 2024-11-16T11:34:46,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43973 {}] regionserver.HRegion(8855): Flush requested on 3e48931017169beba38f17f93d99f245 2024-11-16T11:34:46,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3e48931017169beba38f17f93d99f245 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:34:46,710 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK]] 2024-11-16T11:34:46,710 WARN [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK]] 2024-11-16T11:34:48,702 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T11:34:51,704 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK]] 2024-11-16T11:34:51,704 WARN [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK], DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK]] 2024-11-16T11:34:51,704 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:51,704 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:51,704 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:51,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:51,705 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:51,705 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756876671 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756881695 2024-11-16T11:34:51,706 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45557:45557),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-16T11:34:51,707 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756876671 is not closed yet, will try archiving it next time 2024-11-16T11:34:51,707 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C43973%2C1731756821555:(num 1731756881695) roll requested 2024-11-16T11:34:51,707 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.1731756891707 2024-11-16T11:34:51,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741842_1018 (size=1569) 2024-11-16T11:34:51,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741842_1018 (size=1569) 2024-11-16T11:34:51,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/7cd5a6408d4a4e3593b41e688166225e is 1080, key is row0015/info:/1731756863441/Put/seqid=0 2024-11-16T11:34:51,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741844_1020 (size=12509) 2024-11-16T11:34:51,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741844_1020 (size=12509) 2024-11-16T11:34:51,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/7cd5a6408d4a4e3593b41e688166225e 2024-11-16T11:34:51,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/7cd5a6408d4a4e3593b41e688166225e as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/7cd5a6408d4a4e3593b41e688166225e 2024-11-16T11:34:51,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/7cd5a6408d4a4e3593b41e688166225e, entries=7, sequenceid=31, filesize=12.2 K 2024-11-16T11:34:56,718 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:56,718 WARN [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:56,757 INFO [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:56,757 WARN [FSHLog-0-hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062-prefix:a7948fca2832,43973,1731756821555 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39897,DS-782aee3b-21ea-4dfe-bb7d-0910b2c6192d,DISK], DatanodeInfoWithStorage[127.0.0.1:37779,DS-5e84583c-c927-4458-89b5-47b1cc0a5e2c,DISK]] 2024-11-16T11:34:56,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3e48931017169beba38f17f93d99f245 in 10057ms, sequenceid=31, compaction requested=true 2024-11-16T11:34:56,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3e48931017169beba38f17f93d99f245: 2024-11-16T11:34:56,757 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,757 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-16T11:34:56,757 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,757 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:34:56,758 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,758 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/c106d36c1bc54b8ba7a0218eb324d357 because midkey is the same as first or last row 2024-11-16T11:34:56,758 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,758 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,759 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756881695 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756891707 2024-11-16T11:34:56,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e48931017169beba38f17f93d99f245:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:34:56,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741843_1019 (size=438) 2024-11-16T11:34:56,763 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45557:45557),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-16T11:34:56,763 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756881695 is not closed yet, will try archiving it next time 2024-11-16T11:34:56,763 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756854411 to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs/a7948fca2832%2C43973%2C1731756821555.1731756854411 2024-11-16T11:34:56,763 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C43973%2C1731756821555:(num 1731756896763) roll requested 2024-11-16T11:34:56,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:34:56,763 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.1731756896763 2024-11-16T11:34:56,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741843_1019 (size=438) 2024-11-16T11:34:56,764 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:34:56,766 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756865849 to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs/a7948fca2832%2C43973%2C1731756821555.1731756865849 2024-11-16T11:34:56,768 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756876671 to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs/a7948fca2832%2C43973%2C1731756821555.1731756876671 2024-11-16T11:34:56,768 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:34:56,769 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756881695 to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs/a7948fca2832%2C43973%2C1731756821555.1731756881695 2024-11-16T11:34:56,770 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.HStore(1541): 3e48931017169beba38f17f93d99f245/info is initiating minor compaction (all files) 2024-11-16T11:34:56,771 INFO [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3e48931017169beba38f17f93d99f245/info in TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:34:56,771 INFO [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/c106d36c1bc54b8ba7a0218eb324d357, hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/b853e976c1c9474b90a9743f39a8a303, hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/7cd5a6408d4a4e3593b41e688166225e] into tmpdir=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp, totalSize=36.6 K 2024-11-16T11:34:56,773 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] compactions.Compactor(225): Compacting c106d36c1bc54b8ba7a0218eb324d357, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731756834350 2024-11-16T11:34:56,773 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,773 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,773 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,773 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,773 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,774 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756891707 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756896763 2024-11-16T11:34:56,774 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] compactions.Compactor(225): Compacting b853e976c1c9474b90a9743f39a8a303, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731756848400 2024-11-16T11:34:56,775 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45557:45557),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-16T11:34:56,775 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756891707 is not closed yet, will try archiving it next time 2024-11-16T11:34:56,775 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7cd5a6408d4a4e3593b41e688166225e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731756863441 2024-11-16T11:34:56,775 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43973%2C1731756821555.1731756896775 2024-11-16T11:34:56,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741845_1021 (size=93) 2024-11-16T11:34:56,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741845_1021 (size=93) 2024-11-16T11:34:56,778 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756891707 to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs/a7948fca2832%2C43973%2C1731756821555.1731756891707 2024-11-16T11:34:56,784 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,784 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,788 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,789 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,792 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:34:56,792 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756896763 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/WALs/a7948fca2832,43973,1731756821555/a7948fca2832%2C43973%2C1731756821555.1731756896775 2024-11-16T11:34:56,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741846_1022 (size=1258) 2024-11-16T11:34:56,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741846_1022 (size=1258) 2024-11-16T11:34:56,801 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45557:45557),(127.0.0.1/127.0.0.1:43143:43143)] 2024-11-16T11:34:56,815 INFO [RS:0;a7948fca2832:43973-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e48931017169beba38f17f93d99f245#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:34:56,816 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/550a2bf011df42ea9855ed405ef06749 is 1080, key is row0001/info:/1731756834350/Put/seqid=0 2024-11-16T11:34:56,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741848_1024 (size=27710) 2024-11-16T11:34:56,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741848_1024 (size=27710) 2024-11-16T11:34:56,838 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/550a2bf011df42ea9855ed405ef06749 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/550a2bf011df42ea9855ed405ef06749 2024-11-16T11:34:56,866 INFO [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3e48931017169beba38f17f93d99f245/info of 3e48931017169beba38f17f93d99f245 into 550a2bf011df42ea9855ed405ef06749(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:34:56,866 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3e48931017169beba38f17f93d99f245: 2024-11-16T11:34:56,869 INFO [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245., storeName=3e48931017169beba38f17f93d99f245/info, priority=13, startTime=1731756896759; duration=0sec 2024-11-16T11:34:56,869 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T11:34:56,869 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:34:56,869 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/550a2bf011df42ea9855ed405ef06749 because midkey is the same as first or last row 2024-11-16T11:34:56,870 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T11:34:56,870 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:34:56,870 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/550a2bf011df42ea9855ed405ef06749 because midkey is the same as first or last row 2024-11-16T11:34:56,870 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T11:34:56,870 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:34:56,870 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/550a2bf011df42ea9855ed405ef06749 because midkey is the same as first or last row 2024-11-16T11:34:56,870 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:34:56,871 DEBUG [RS:0;a7948fca2832:43973-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e48931017169beba38f17f93d99f245:info 2024-11-16T11:35:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43973 {}] regionserver.HRegion(8855): Flush requested on 3e48931017169beba38f17f93d99f245 2024-11-16T11:35:08,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3e48931017169beba38f17f93d99f245 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:35:08,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/80bd49a5c494433c9b66046beab79089 is 1080, key is row0022/info:/1731756896776/Put/seqid=0 2024-11-16T11:35:08,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741849_1025 (size=12509) 2024-11-16T11:35:08,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741849_1025 (size=12509) 2024-11-16T11:35:08,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/80bd49a5c494433c9b66046beab79089 2024-11-16T11:35:08,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/80bd49a5c494433c9b66046beab79089 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/80bd49a5c494433c9b66046beab79089 2024-11-16T11:35:08,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/80bd49a5c494433c9b66046beab79089, entries=7, sequenceid=42, filesize=12.2 K 2024-11-16T11:35:08,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3e48931017169beba38f17f93d99f245 in 37ms, sequenceid=42, compaction requested=false 2024-11-16T11:35:08,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3e48931017169beba38f17f93d99f245: 2024-11-16T11:35:08,841 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-16T11:35:08,841 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:08,842 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/550a2bf011df42ea9855ed405ef06749 because midkey is the same as first or last row 2024-11-16T11:35:09,963 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T11:35:14,701 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3e48931017169beba38f17f93d99f245, had cached 0 bytes from a total of 40219 2024-11-16T11:35:16,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T11:35:16,823 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:35:16,823 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:35:16,832 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:16,833 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:16,833 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T11:35:16,833 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T11:35:16,833 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=437771206, stopped=false 2024-11-16T11:35:16,833 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7948fca2832,42855,1731756820796 2024-11-16T11:35:16,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:35:16,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:35:16,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:16,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:16,909 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:35:16,910 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:35:16,910 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:35:16,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:16,910 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:35:16,910 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:35:16,911 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7948fca2832,43973,1731756821555' ***** 2024-11-16T11:35:16,911 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T11:35:16,912 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T11:35:16,912 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T11:35:16,913 INFO [RS:0;a7948fca2832:43973 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T11:35:16,913 INFO [RS:0;a7948fca2832:43973 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T11:35:16,913 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(3091): Received CLOSE for 3e48931017169beba38f17f93d99f245 2024-11-16T11:35:16,914 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(959): stopping server a7948fca2832,43973,1731756821555 2024-11-16T11:35:16,914 INFO [RS:0;a7948fca2832:43973 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:35:16,914 INFO [RS:0;a7948fca2832:43973 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7948fca2832:43973. 2024-11-16T11:35:16,915 DEBUG [RS:0;a7948fca2832:43973 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:35:16,915 DEBUG [RS:0;a7948fca2832:43973 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:16,915 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3e48931017169beba38f17f93d99f245, disabling compactions & flushes 2024-11-16T11:35:16,915 INFO [RS:0;a7948fca2832:43973 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T11:35:16,915 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:35:16,915 INFO [RS:0;a7948fca2832:43973 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T11:35:16,915 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:35:16,915 INFO [RS:0;a7948fca2832:43973 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T11:35:16,915 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. after waiting 0 ms 2024-11-16T11:35:16,915 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T11:35:16,915 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:35:16,916 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 3e48931017169beba38f17f93d99f245 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-16T11:35:16,916 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T11:35:16,916 DEBUG [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 3e48931017169beba38f17f93d99f245=TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.} 2024-11-16T11:35:16,916 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:35:16,916 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:35:16,916 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:35:16,916 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:35:16,916 DEBUG [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3e48931017169beba38f17f93d99f245 2024-11-16T11:35:16,916 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:35:16,917 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-16T11:35:16,922 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/5956fef76c0e4d4886e3ad3e5ec99a32 is 1080, key is row0029/info:/1731756910807/Put/seqid=0 2024-11-16T11:35:16,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741850_1026 (size=8193) 2024-11-16T11:35:16,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741850_1026 (size=8193) 2024-11-16T11:35:16,930 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/5956fef76c0e4d4886e3ad3e5ec99a32 2024-11-16T11:35:16,940 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/.tmp/info/5956fef76c0e4d4886e3ad3e5ec99a32 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/5956fef76c0e4d4886e3ad3e5ec99a32 2024-11-16T11:35:16,941 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/.tmp/info/fb603a70f44b4da4a32b4d8e9a2b8289 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245./info:regioninfo/1731756824725/Put/seqid=0 2024-11-16T11:35:16,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741851_1027 (size=7016) 2024-11-16T11:35:16,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741851_1027 (size=7016) 2024-11-16T11:35:16,948 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/.tmp/info/fb603a70f44b4da4a32b4d8e9a2b8289 2024-11-16T11:35:16,949 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/5956fef76c0e4d4886e3ad3e5ec99a32, entries=3, sequenceid=48, filesize=8.0 K 2024-11-16T11:35:16,951 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 3e48931017169beba38f17f93d99f245 in 35ms, sequenceid=48, compaction requested=true 2024-11-16T11:35:16,951 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/c106d36c1bc54b8ba7a0218eb324d357, hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/b853e976c1c9474b90a9743f39a8a303, hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/7cd5a6408d4a4e3593b41e688166225e] to archive 2024-11-16T11:35:16,958 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T11:35:16,962 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/c106d36c1bc54b8ba7a0218eb324d357 to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/c106d36c1bc54b8ba7a0218eb324d357 2024-11-16T11:35:16,964 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/b853e976c1c9474b90a9743f39a8a303 to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/b853e976c1c9474b90a9743f39a8a303 2024-11-16T11:35:16,966 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/7cd5a6408d4a4e3593b41e688166225e to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/info/7cd5a6408d4a4e3593b41e688166225e 2024-11-16T11:35:16,981 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/.tmp/ns/8b9d1c3607ef4e898dfff6c720127d52 is 43, key is default/ns:d/1731756824059/Put/seqid=0 2024-11-16T11:35:16,979 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a7948fca2832:42855 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T11:35:16,986 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c106d36c1bc54b8ba7a0218eb324d357=12509, b853e976c1c9474b90a9743f39a8a303=12509, 7cd5a6408d4a4e3593b41e688166225e=12509] 2024-11-16T11:35:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741852_1028 (size=5153) 2024-11-16T11:35:16,990 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/.tmp/ns/8b9d1c3607ef4e898dfff6c720127d52 2024-11-16T11:35:16,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741852_1028 (size=5153) 2024-11-16T11:35:16,994 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/default/TestLogRolling-testSlowSyncLogRolling/3e48931017169beba38f17f93d99f245/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-16T11:35:16,998 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:35:16,998 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3e48931017169beba38f17f93d99f245: Waiting for close lock at 1731756916914Running coprocessor pre-close hooks at 1731756916915 (+1 ms)Disabling compacts and flushes for region at 1731756916915Disabling writes for close at 1731756916915Obtaining lock to block concurrent updates at 1731756916916 (+1 ms)Preparing flush snapshotting stores in 3e48931017169beba38f17f93d99f245 at 1731756916916Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731756916916Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. at 1731756916917 (+1 ms)Flushing 3e48931017169beba38f17f93d99f245/info: creating writer at 1731756916917Flushing 3e48931017169beba38f17f93d99f245/info: appending metadata at 1731756916921 (+4 ms)Flushing 3e48931017169beba38f17f93d99f245/info: closing flushed file at 1731756916921Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31d5e35: reopening flushed file at 1731756916939 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 3e48931017169beba38f17f93d99f245 in 35ms, sequenceid=48, compaction requested=true at 1731756916951 (+12 ms)Writing region close event to WAL at 1731756916987 (+36 ms)Running coprocessor post-close hooks at 1731756916996 (+9 ms)Closed at 1731756916997 (+1 ms) 2024-11-16T11:35:16,998 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731756824262.3e48931017169beba38f17f93d99f245. 2024-11-16T11:35:17,017 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/.tmp/table/50f31176a7564dd691d580f250d95c90 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731756824746/Put/seqid=0 2024-11-16T11:35:17,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741853_1029 (size=5396) 2024-11-16T11:35:17,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741853_1029 (size=5396) 2024-11-16T11:35:17,023 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/.tmp/table/50f31176a7564dd691d580f250d95c90 2024-11-16T11:35:17,031 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/.tmp/info/fb603a70f44b4da4a32b4d8e9a2b8289 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/info/fb603a70f44b4da4a32b4d8e9a2b8289 2024-11-16T11:35:17,039 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/info/fb603a70f44b4da4a32b4d8e9a2b8289, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T11:35:17,041 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/.tmp/ns/8b9d1c3607ef4e898dfff6c720127d52 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/ns/8b9d1c3607ef4e898dfff6c720127d52 2024-11-16T11:35:17,048 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/ns/8b9d1c3607ef4e898dfff6c720127d52, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T11:35:17,050 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/.tmp/table/50f31176a7564dd691d580f250d95c90 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/table/50f31176a7564dd691d580f250d95c90 2024-11-16T11:35:17,060 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/table/50f31176a7564dd691d580f250d95c90, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T11:35:17,061 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false 2024-11-16T11:35:17,067 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T11:35:17,068 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:35:17,068 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:35:17,068 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731756916916Running coprocessor pre-close hooks at 1731756916916Disabling compacts and flushes for region at 1731756916916Disabling writes for close at 1731756916916Obtaining lock to block concurrent updates at 1731756916917 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731756916917Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731756916917Flushing stores of hbase:meta,,1.1588230740 at 1731756916918 (+1 ms)Flushing 1588230740/info: creating writer at 1731756916919 (+1 ms)Flushing 1588230740/info: appending metadata at 1731756916941 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731756916941Flushing 1588230740/ns: creating writer at 1731756916961 (+20 ms)Flushing 1588230740/ns: appending metadata at 1731756916981 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731756916981Flushing 1588230740/table: creating writer at 1731756917002 (+21 ms)Flushing 1588230740/table: appending metadata at 1731756917016 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731756917016Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19250f85: reopening flushed file at 1731756917030 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e8a6865: reopening flushed file at 1731756917040 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1aa85945: reopening flushed file at 1731756917049 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false at 1731756917061 (+12 ms)Writing region close event to WAL at 1731756917062 (+1 ms)Running coprocessor post-close hooks at 1731756917067 (+5 ms)Closed at 1731756917068 (+1 ms) 2024-11-16T11:35:17,068 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T11:35:17,117 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(976): stopping server a7948fca2832,43973,1731756821555; all regions closed. 2024-11-16T11:35:17,119 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,119 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,120 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,120 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,120 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741834_1010 (size=3066) 2024-11-16T11:35:17,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741834_1010 (size=3066) 2024-11-16T11:35:17,129 DEBUG [RS:0;a7948fca2832:43973 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs 2024-11-16T11:35:17,129 INFO [RS:0;a7948fca2832:43973 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C43973%2C1731756821555.meta:.meta(num 1731756823854) 2024-11-16T11:35:17,130 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,130 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,130 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,130 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,130 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741847_1023 (size=12695) 2024-11-16T11:35:17,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741847_1023 (size=12695) 2024-11-16T11:35:17,137 DEBUG [RS:0;a7948fca2832:43973 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/oldWALs 2024-11-16T11:35:17,137 INFO [RS:0;a7948fca2832:43973 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C43973%2C1731756821555:(num 1731756896775) 2024-11-16T11:35:17,138 DEBUG [RS:0;a7948fca2832:43973 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:17,138 INFO [RS:0;a7948fca2832:43973 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:35:17,138 INFO [RS:0;a7948fca2832:43973 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:35:17,138 INFO [RS:0;a7948fca2832:43973 {}] hbase.ChoreService(370): Chore service for: regionserver/a7948fca2832:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T11:35:17,138 INFO [RS:0;a7948fca2832:43973 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:35:17,138 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:35:17,139 INFO [RS:0;a7948fca2832:43973 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43973 2024-11-16T11:35:17,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:35:17,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7948fca2832,43973,1731756821555 2024-11-16T11:35:17,151 INFO [RS:0;a7948fca2832:43973 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:35:17,153 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7948fca2832,43973,1731756821555] 2024-11-16T11:35:17,172 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7948fca2832,43973,1731756821555 already deleted, retry=false 2024-11-16T11:35:17,172 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7948fca2832,43973,1731756821555 expired; onlineServers=0 2024-11-16T11:35:17,172 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7948fca2832,42855,1731756820796' ***** 2024-11-16T11:35:17,172 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T11:35:17,172 INFO [M:0;a7948fca2832:42855 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:35:17,172 INFO [M:0;a7948fca2832:42855 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:35:17,173 DEBUG [M:0;a7948fca2832:42855 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T11:35:17,173 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T11:35:17,173 DEBUG [M:0;a7948fca2832:42855 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T11:35:17,173 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756823010 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756823010,5,FailOnTimeoutGroup] 2024-11-16T11:35:17,173 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756823011 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756823011,5,FailOnTimeoutGroup] 2024-11-16T11:35:17,173 INFO [M:0;a7948fca2832:42855 {}] hbase.ChoreService(370): Chore service for: master/a7948fca2832:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T11:35:17,173 INFO [M:0;a7948fca2832:42855 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:35:17,173 DEBUG [M:0;a7948fca2832:42855 {}] master.HMaster(1795): Stopping service threads 2024-11-16T11:35:17,173 INFO [M:0;a7948fca2832:42855 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T11:35:17,173 INFO [M:0;a7948fca2832:42855 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:35:17,174 INFO [M:0;a7948fca2832:42855 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T11:35:17,174 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T11:35:17,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T11:35:17,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:17,183 DEBUG [M:0;a7948fca2832:42855 {}] zookeeper.ZKUtil(347): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T11:35:17,183 WARN [M:0;a7948fca2832:42855 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T11:35:17,184 INFO [M:0;a7948fca2832:42855 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/.lastflushedseqids 2024-11-16T11:35:17,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741854_1030 (size=130) 2024-11-16T11:35:17,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741854_1030 (size=130) 2024-11-16T11:35:17,197 INFO [M:0;a7948fca2832:42855 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T11:35:17,197 INFO [M:0;a7948fca2832:42855 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T11:35:17,197 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:35:17,197 INFO [M:0;a7948fca2832:42855 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:17,197 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:17,197 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:35:17,197 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:17,198 INFO [M:0;a7948fca2832:42855 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-16T11:35:17,215 DEBUG [M:0;a7948fca2832:42855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/686ecf1e0031451da0f19b5c4ca2ebde is 82, key is hbase:meta,,1/info:regioninfo/1731756823922/Put/seqid=0 2024-11-16T11:35:17,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741855_1031 (size=5672) 2024-11-16T11:35:17,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741855_1031 (size=5672) 2024-11-16T11:35:17,222 INFO [M:0;a7948fca2832:42855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/686ecf1e0031451da0f19b5c4ca2ebde 2024-11-16T11:35:17,242 DEBUG [M:0;a7948fca2832:42855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8090a59ab17843c9b0c9637ab26d6541 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731756824754/Put/seqid=0 2024-11-16T11:35:17,242 INFO [regionserver/a7948fca2832:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:35:17,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741856_1032 (size=6247) 2024-11-16T11:35:17,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741856_1032 (size=6247) 2024-11-16T11:35:17,248 INFO [M:0;a7948fca2832:42855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8090a59ab17843c9b0c9637ab26d6541 2024-11-16T11:35:17,254 INFO [M:0;a7948fca2832:42855 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8090a59ab17843c9b0c9637ab26d6541 2024-11-16T11:35:17,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:35:17,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43973-0x101436c01d80001, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:35:17,263 INFO [RS:0;a7948fca2832:43973 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:35:17,263 INFO [RS:0;a7948fca2832:43973 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7948fca2832,43973,1731756821555; zookeeper connection closed. 2024-11-16T11:35:17,263 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@41ccdd7b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@41ccdd7b 2024-11-16T11:35:17,264 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T11:35:17,271 DEBUG [M:0;a7948fca2832:42855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3fee876c01bd4c1eb45c0f8cd534a014 is 69, key is a7948fca2832,43973,1731756821555/rs:state/1731756823111/Put/seqid=0 2024-11-16T11:35:17,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741857_1033 (size=5156) 2024-11-16T11:35:17,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741857_1033 (size=5156) 2024-11-16T11:35:17,278 INFO [M:0;a7948fca2832:42855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3fee876c01bd4c1eb45c0f8cd534a014 2024-11-16T11:35:17,299 DEBUG [M:0;a7948fca2832:42855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1ebada12e46b4cbe8a76d261d37584b0 is 52, key is load_balancer_on/state:d/1731756824243/Put/seqid=0 2024-11-16T11:35:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741858_1034 (size=5056) 2024-11-16T11:35:17,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741858_1034 (size=5056) 2024-11-16T11:35:17,305 INFO [M:0;a7948fca2832:42855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1ebada12e46b4cbe8a76d261d37584b0 2024-11-16T11:35:17,313 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/686ecf1e0031451da0f19b5c4ca2ebde as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/686ecf1e0031451da0f19b5c4ca2ebde 2024-11-16T11:35:17,320 INFO [M:0;a7948fca2832:42855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/686ecf1e0031451da0f19b5c4ca2ebde, entries=8, sequenceid=59, filesize=5.5 K 2024-11-16T11:35:17,321 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8090a59ab17843c9b0c9637ab26d6541 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8090a59ab17843c9b0c9637ab26d6541 2024-11-16T11:35:17,327 INFO [M:0;a7948fca2832:42855 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8090a59ab17843c9b0c9637ab26d6541 2024-11-16T11:35:17,327 INFO [M:0;a7948fca2832:42855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8090a59ab17843c9b0c9637ab26d6541, entries=6, sequenceid=59, filesize=6.1 K 2024-11-16T11:35:17,329 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3fee876c01bd4c1eb45c0f8cd534a014 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3fee876c01bd4c1eb45c0f8cd534a014 2024-11-16T11:35:17,335 INFO [M:0;a7948fca2832:42855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3fee876c01bd4c1eb45c0f8cd534a014, entries=1, sequenceid=59, filesize=5.0 K 2024-11-16T11:35:17,337 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1ebada12e46b4cbe8a76d261d37584b0 as hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1ebada12e46b4cbe8a76d261d37584b0 2024-11-16T11:35:17,344 INFO [M:0;a7948fca2832:42855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1ebada12e46b4cbe8a76d261d37584b0, entries=1, sequenceid=59, filesize=4.9 K 2024-11-16T11:35:17,345 INFO [M:0;a7948fca2832:42855 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=59, compaction requested=false 2024-11-16T11:35:17,346 INFO [M:0;a7948fca2832:42855 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:17,347 DEBUG [M:0;a7948fca2832:42855 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731756917197Disabling compacts and flushes for region at 1731756917197Disabling writes for close at 1731756917197Obtaining lock to block concurrent updates at 1731756917198 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731756917198Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731756917198Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731756917199 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731756917199Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731756917215 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731756917215Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731756917228 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731756917241 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731756917241Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731756917255 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731756917271 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731756917271Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731756917284 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731756917298 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731756917298Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d2c4a64: reopening flushed file at 1731756917312 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ee97095: reopening flushed file at 1731756917320 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63e8cb0b: reopening flushed file at 1731756917328 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e2882c9: reopening flushed file at 1731756917336 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=59, compaction requested=false at 1731756917345 (+9 ms)Writing region close event to WAL at 1731756917346 (+1 ms)Closed at 1731756917346 2024-11-16T11:35:17,347 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,348 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,348 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,348 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,348 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:17,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37779 is added to blk_1073741830_1006 (size=27973) 2024-11-16T11:35:17,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39897 is added to blk_1073741830_1006 (size=27973) 2024-11-16T11:35:17,351 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:35:17,351 INFO [M:0;a7948fca2832:42855 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T11:35:17,351 INFO [M:0;a7948fca2832:42855 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42855 2024-11-16T11:35:17,351 INFO [M:0;a7948fca2832:42855 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:35:17,461 INFO [M:0;a7948fca2832:42855 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:35:17,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:35:17,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x101436c01d80000, quorum=127.0.0.1:56083, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:35:17,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50b8c1e0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:17,472 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1225e5c2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:17,472 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:17,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1612a852{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:17,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f51668d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:17,475 WARN [BP-412134530-172.17.0.2-1731756816644 heartbeating to localhost/127.0.0.1:39015 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:35:17,475 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:35:17,475 WARN [BP-412134530-172.17.0.2-1731756816644 heartbeating to localhost/127.0.0.1:39015 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-412134530-172.17.0.2-1731756816644 (Datanode Uuid 3665947b-5d27-45f3-879f-8ddcaf5550fc) service to localhost/127.0.0.1:39015 2024-11-16T11:35:17,475 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:35:17,476 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/data/data3/current/BP-412134530-172.17.0.2-1731756816644 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:17,476 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/data/data4/current/BP-412134530-172.17.0.2-1731756816644 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:17,476 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:35:17,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c4805bf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:17,482 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65cf0e3b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:17,482 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:17,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@616d254c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:17,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21c149f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:17,484 WARN [BP-412134530-172.17.0.2-1731756816644 heartbeating to localhost/127.0.0.1:39015 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:35:17,484 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:35:17,484 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:35:17,484 WARN [BP-412134530-172.17.0.2-1731756816644 heartbeating to localhost/127.0.0.1:39015 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-412134530-172.17.0.2-1731756816644 (Datanode Uuid 33cb1a64-49cf-4211-99dc-20553c8ca126) service to localhost/127.0.0.1:39015 2024-11-16T11:35:17,485 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/data/data1/current/BP-412134530-172.17.0.2-1731756816644 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:17,485 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/cluster_371b9d0d-aacc-fe33-fd67-ac63869a4143/data/data2/current/BP-412134530-172.17.0.2-1731756816644 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:17,485 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:35:17,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@595f45d4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:35:17,494 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1298d5a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:17,494 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:17,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18d8eba1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:17,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73b23f80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:17,505 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T11:35:17,535 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T11:35:17,545 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/a7948fca2832:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/a7948fca2832:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39015 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39015 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:39015 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:39015 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:39015 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39015 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@364af4a3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39015 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/a7948fca2832:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39015 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 224), ProcessCount=11 (was 11), AvailableMemoryMB=4968 (was 5445) 2024-11-16T11:35:17,551 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=4968 2024-11-16T11:35:17,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.log.dir so I do NOT create it in target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6135c3ca-2bb3-1b7f-0a84-48504f1a2062/hadoop.tmp.dir so I do NOT create it in target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64, deleteOnExit=true 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/test.cache.data in system properties and HBase conf 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.log.dir in system properties and HBase conf 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T11:35:17,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T11:35:17,553 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T11:35:17,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:35:17,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:35:17,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T11:35:17,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:35:17,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T11:35:17,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T11:35:17,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:35:17,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:35:17,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T11:35:17,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/nfs.dump.dir in system properties and HBase conf 2024-11-16T11:35:17,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/java.io.tmpdir in system properties and HBase conf 2024-11-16T11:35:17,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:35:17,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T11:35:17,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T11:35:17,568 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:35:17,891 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:17,898 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:17,899 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:17,899 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:17,899 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:35:17,901 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:17,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@766cfc4e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:17,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3839647f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:18,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68e16ee6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/java.io.tmpdir/jetty-localhost-44531-hadoop-hdfs-3_4_1-tests_jar-_-any-15452413881882825189/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:35:18,002 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65d95c6f{HTTP/1.1, (http/1.1)}{localhost:44531} 2024-11-16T11:35:18,002 INFO [Time-limited test {}] server.Server(415): Started @103224ms 2024-11-16T11:35:18,014 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:35:18,337 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:18,343 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:18,344 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:18,344 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:18,344 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:35:18,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dfbaab6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:18,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ba968b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:18,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6cc6305e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/java.io.tmpdir/jetty-localhost-45609-hadoop-hdfs-3_4_1-tests_jar-_-any-14769351493352088027/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:18,448 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50f2a115{HTTP/1.1, (http/1.1)}{localhost:45609} 2024-11-16T11:35:18,448 INFO [Time-limited test {}] server.Server(415): Started @103670ms 2024-11-16T11:35:18,450 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:35:18,485 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:18,490 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:18,491 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:18,491 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:18,491 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:35:18,492 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72380cd1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:18,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22208119{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:18,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35bbb04a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/java.io.tmpdir/jetty-localhost-33123-hadoop-hdfs-3_4_1-tests_jar-_-any-13860037811580573407/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:18,595 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13a73383{HTTP/1.1, (http/1.1)}{localhost:33123} 2024-11-16T11:35:18,596 INFO [Time-limited test {}] server.Server(415): Started @103817ms 2024-11-16T11:35:18,598 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:35:19,576 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/data/data2/current/BP-1135396281-172.17.0.2-1731756917579/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:19,576 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/data/data1/current/BP-1135396281-172.17.0.2-1731756917579/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:19,597 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:35:19,601 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x165939ca5e4cc1d9 with lease ID 0xc5b8e852fe5e9732: Processing first storage report for DS-e828f8a5-6bed-4b92-b270-eef3955d2370 from datanode DatanodeRegistration(127.0.0.1:35579, datanodeUuid=d5d73470-e59e-4937-a603-7f664b1a68d0, infoPort=35881, infoSecurePort=0, ipcPort=35147, storageInfo=lv=-57;cid=testClusterID;nsid=1543688367;c=1731756917579) 2024-11-16T11:35:19,601 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x165939ca5e4cc1d9 with lease ID 0xc5b8e852fe5e9732: from storage DS-e828f8a5-6bed-4b92-b270-eef3955d2370 node DatanodeRegistration(127.0.0.1:35579, datanodeUuid=d5d73470-e59e-4937-a603-7f664b1a68d0, infoPort=35881, infoSecurePort=0, ipcPort=35147, storageInfo=lv=-57;cid=testClusterID;nsid=1543688367;c=1731756917579), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T11:35:19,601 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x165939ca5e4cc1d9 with lease ID 0xc5b8e852fe5e9732: Processing first storage report for DS-d9252a6b-2698-45f5-9bd1-c6fc3a85fe5e from datanode DatanodeRegistration(127.0.0.1:35579, datanodeUuid=d5d73470-e59e-4937-a603-7f664b1a68d0, infoPort=35881, infoSecurePort=0, ipcPort=35147, storageInfo=lv=-57;cid=testClusterID;nsid=1543688367;c=1731756917579) 2024-11-16T11:35:19,601 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x165939ca5e4cc1d9 with lease ID 0xc5b8e852fe5e9732: from storage DS-d9252a6b-2698-45f5-9bd1-c6fc3a85fe5e node DatanodeRegistration(127.0.0.1:35579, datanodeUuid=d5d73470-e59e-4937-a603-7f664b1a68d0, infoPort=35881, infoSecurePort=0, ipcPort=35147, storageInfo=lv=-57;cid=testClusterID;nsid=1543688367;c=1731756917579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:19,819 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/data/data3/current/BP-1135396281-172.17.0.2-1731756917579/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:19,819 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/data/data4/current/BP-1135396281-172.17.0.2-1731756917579/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:19,838 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:35:19,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe899bde29e042f01 with lease ID 0xc5b8e852fe5e9733: Processing first storage report for DS-273f321a-277c-459c-93eb-bd81230d0fca from datanode DatanodeRegistration(127.0.0.1:37573, datanodeUuid=30c0e00d-9aeb-41ee-b836-71011c099ade, infoPort=41185, infoSecurePort=0, ipcPort=34539, storageInfo=lv=-57;cid=testClusterID;nsid=1543688367;c=1731756917579) 2024-11-16T11:35:19,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe899bde29e042f01 with lease ID 0xc5b8e852fe5e9733: from storage DS-273f321a-277c-459c-93eb-bd81230d0fca node DatanodeRegistration(127.0.0.1:37573, datanodeUuid=30c0e00d-9aeb-41ee-b836-71011c099ade, infoPort=41185, infoSecurePort=0, ipcPort=34539, storageInfo=lv=-57;cid=testClusterID;nsid=1543688367;c=1731756917579), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:19,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe899bde29e042f01 with lease ID 0xc5b8e852fe5e9733: Processing first storage report for DS-1413f478-d9ba-4111-8e7c-64fe7c881088 from datanode DatanodeRegistration(127.0.0.1:37573, datanodeUuid=30c0e00d-9aeb-41ee-b836-71011c099ade, infoPort=41185, infoSecurePort=0, ipcPort=34539, storageInfo=lv=-57;cid=testClusterID;nsid=1543688367;c=1731756917579) 2024-11-16T11:35:19,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe899bde29e042f01 with lease ID 0xc5b8e852fe5e9733: from storage DS-1413f478-d9ba-4111-8e7c-64fe7c881088 node DatanodeRegistration(127.0.0.1:37573, datanodeUuid=30c0e00d-9aeb-41ee-b836-71011c099ade, infoPort=41185, infoSecurePort=0, ipcPort=34539, storageInfo=lv=-57;cid=testClusterID;nsid=1543688367;c=1731756917579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:19,940 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278 2024-11-16T11:35:19,943 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/zookeeper_0, clientPort=55918, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T11:35:19,944 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55918 2024-11-16T11:35:19,944 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:19,946 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:19,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:35:19,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:35:19,957 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4 with version=8 2024-11-16T11:35:19,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/hbase-staging 2024-11-16T11:35:19,959 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:35:19,959 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:19,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:19,960 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:35:19,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:19,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:35:19,960 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T11:35:19,960 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:35:19,961 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37457 2024-11-16T11:35:19,963 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37457 connecting to ZooKeeper ensemble=127.0.0.1:55918 2024-11-16T11:35:20,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:374570x0, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:35:20,026 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37457-0x101436d88320000 connected 2024-11-16T11:35:20,115 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:20,119 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:20,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:35:20,125 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4, hbase.cluster.distributed=false 2024-11-16T11:35:20,128 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:35:20,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37457 2024-11-16T11:35:20,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37457 2024-11-16T11:35:20,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37457 2024-11-16T11:35:20,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37457 2024-11-16T11:35:20,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37457 2024-11-16T11:35:20,143 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:35:20,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:20,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:20,144 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:35:20,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:20,144 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:35:20,144 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T11:35:20,144 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:35:20,145 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37923 2024-11-16T11:35:20,146 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37923 connecting to ZooKeeper ensemble=127.0.0.1:55918 2024-11-16T11:35:20,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:20,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:20,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379230x0, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:35:20,164 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379230x0, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:35:20,164 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37923-0x101436d88320001 connected 2024-11-16T11:35:20,165 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T11:35:20,165 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T11:35:20,166 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T11:35:20,167 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:35:20,168 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37923 2024-11-16T11:35:20,168 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37923 2024-11-16T11:35:20,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37923 2024-11-16T11:35:20,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37923 2024-11-16T11:35:20,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37923 2024-11-16T11:35:20,183 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7948fca2832:37457 2024-11-16T11:35:20,183 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7948fca2832,37457,1731756919959 2024-11-16T11:35:20,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:35:20,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:35:20,196 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7948fca2832,37457,1731756919959 2024-11-16T11:35:20,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T11:35:20,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,208 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T11:35:20,209 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7948fca2832,37457,1731756919959 from backup master directory 2024-11-16T11:35:20,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:35:20,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7948fca2832,37457,1731756919959 2024-11-16T11:35:20,218 WARN [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:35:20,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:35:20,219 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7948fca2832,37457,1731756919959 2024-11-16T11:35:20,226 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/hbase.id] with ID: e036b658-ca35-4b31-8178-9be9883f9625 2024-11-16T11:35:20,226 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/.tmp/hbase.id 2024-11-16T11:35:20,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:35:20,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:35:20,236 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/.tmp/hbase.id]:[hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/hbase.id] 2024-11-16T11:35:20,251 INFO [master/a7948fca2832:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:20,251 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T11:35:20,252 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T11:35:20,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:35:20,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:35:20,268 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:35:20,269 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T11:35:20,270 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:35:20,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:35:20,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:35:20,280 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store 2024-11-16T11:35:20,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:35:20,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:35:20,288 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:20,288 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:35:20,288 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:20,289 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:20,289 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:35:20,289 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:20,289 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:20,289 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731756920288Disabling compacts and flushes for region at 1731756920288Disabling writes for close at 1731756920289 (+1 ms)Writing region close event to WAL at 1731756920289Closed at 1731756920289 2024-11-16T11:35:20,290 WARN [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/.initializing 2024-11-16T11:35:20,290 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/WALs/a7948fca2832,37457,1731756919959 2024-11-16T11:35:20,294 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C37457%2C1731756919959, suffix=, logDir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/WALs/a7948fca2832,37457,1731756919959, archiveDir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/oldWALs, maxLogs=10 2024-11-16T11:35:20,294 INFO [master/a7948fca2832:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C37457%2C1731756919959.1731756920294 2024-11-16T11:35:20,300 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/WALs/a7948fca2832,37457,1731756919959/a7948fca2832%2C37457%2C1731756919959.1731756920294 2024-11-16T11:35:20,302 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41185:41185),(127.0.0.1/127.0.0.1:35881:35881)] 2024-11-16T11:35:20,303 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:35:20,303 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:20,304 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,304 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T11:35:20,309 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:20,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:20,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,312 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T11:35:20,312 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:20,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:35:20,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,316 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T11:35:20,316 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:20,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:35:20,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,319 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T11:35:20,319 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:20,319 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:35:20,319 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,321 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,322 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,324 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,324 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,325 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T11:35:20,327 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:20,331 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:35:20,332 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=784007, jitterRate=-0.003084540367126465}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T11:35:20,333 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731756920304Initializing all the Stores at 1731756920305 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756920305Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756920306 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756920306Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756920306Cleaning up temporary data from old regions at 1731756920324 (+18 ms)Region opened successfully at 1731756920333 (+9 ms) 2024-11-16T11:35:20,333 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T11:35:20,338 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ab13c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:35:20,339 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T11:35:20,339 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T11:35:20,339 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T11:35:20,340 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T11:35:20,340 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T11:35:20,341 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T11:35:20,341 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T11:35:20,344 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T11:35:20,345 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T11:35:20,355 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T11:35:20,355 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T11:35:20,356 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T11:35:20,365 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T11:35:20,366 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T11:35:20,367 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T11:35:20,376 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T11:35:20,377 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T11:35:20,386 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T11:35:20,389 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T11:35:20,397 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T11:35:20,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:35:20,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:35:20,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,408 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7948fca2832,37457,1731756919959, sessionid=0x101436d88320000, setting cluster-up flag (Was=false) 2024-11-16T11:35:20,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,460 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T11:35:20,463 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,37457,1731756919959 2024-11-16T11:35:20,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:20,513 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T11:35:20,517 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,37457,1731756919959 2024-11-16T11:35:20,520 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T11:35:20,524 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T11:35:20,524 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T11:35:20,525 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T11:35:20,525 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7948fca2832,37457,1731756919959 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T11:35:20,527 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:35:20,527 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:35:20,528 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:35:20,528 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:35:20,528 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7948fca2832:0, corePoolSize=10, maxPoolSize=10 2024-11-16T11:35:20,528 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,528 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:35:20,528 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,529 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731756950529 2024-11-16T11:35:20,530 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T11:35:20,530 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T11:35:20,530 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T11:35:20,530 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T11:35:20,530 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T11:35:20,530 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T11:35:20,531 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,531 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:35:20,531 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T11:35:20,531 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T11:35:20,532 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T11:35:20,532 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T11:35:20,532 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T11:35:20,532 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T11:35:20,532 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756920532,5,FailOnTimeoutGroup] 2024-11-16T11:35:20,532 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:20,533 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756920532,5,FailOnTimeoutGroup] 2024-11-16T11:35:20,533 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,533 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T11:35:20,533 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,533 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T11:35:20,533 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:35:20,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:35:20,541 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T11:35:20,542 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4 2024-11-16T11:35:20,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:35:20,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:35:20,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:20,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:35:20,554 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:35:20,554 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:20,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:20,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:35:20,556 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:35:20,556 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:20,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:20,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:35:20,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:35:20,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:20,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:20,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:35:20,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:35:20,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:20,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:20,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:35:20,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740 2024-11-16T11:35:20,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740 2024-11-16T11:35:20,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:35:20,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:35:20,565 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:35:20,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:35:20,568 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:35:20,569 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727265, jitterRate=-0.07523511350154877}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:35:20,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731756920550Initializing all the Stores at 1731756920551 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756920551Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756920552 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756920552Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756920552Cleaning up temporary data from old regions at 1731756920565 (+13 ms)Region opened successfully at 1731756920570 (+5 ms) 2024-11-16T11:35:20,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:35:20,570 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:35:20,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:35:20,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:35:20,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:35:20,571 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:35:20,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731756920570Disabling compacts and flushes for region at 1731756920570Disabling writes for close at 1731756920570Writing region close event to WAL at 1731756920571 (+1 ms)Closed at 1731756920571 2024-11-16T11:35:20,572 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:35:20,572 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T11:35:20,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T11:35:20,574 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(746): ClusterId : e036b658-ca35-4b31-8178-9be9883f9625 2024-11-16T11:35:20,574 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T11:35:20,574 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:35:20,576 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T11:35:20,588 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T11:35:20,588 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T11:35:20,598 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T11:35:20,598 DEBUG [RS:0;a7948fca2832:37923 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70b907de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:35:20,609 DEBUG [RS:0;a7948fca2832:37923 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7948fca2832:37923 2024-11-16T11:35:20,609 INFO [RS:0;a7948fca2832:37923 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T11:35:20,609 INFO [RS:0;a7948fca2832:37923 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T11:35:20,609 DEBUG [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T11:35:20,610 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7948fca2832,37457,1731756919959 with port=37923, startcode=1731756920143 2024-11-16T11:35:20,611 DEBUG [RS:0;a7948fca2832:37923 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T11:35:20,613 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46243, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T11:35:20,614 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37457 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7948fca2832,37923,1731756920143 2024-11-16T11:35:20,614 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37457 {}] master.ServerManager(517): Registering regionserver=a7948fca2832,37923,1731756920143 2024-11-16T11:35:20,616 DEBUG [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4 2024-11-16T11:35:20,616 DEBUG [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39931 2024-11-16T11:35:20,616 DEBUG [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T11:35:20,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:35:20,629 DEBUG [RS:0;a7948fca2832:37923 {}] zookeeper.ZKUtil(111): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7948fca2832,37923,1731756920143 2024-11-16T11:35:20,629 WARN [RS:0;a7948fca2832:37923 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:35:20,629 INFO [RS:0;a7948fca2832:37923 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:35:20,630 DEBUG [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/WALs/a7948fca2832,37923,1731756920143 2024-11-16T11:35:20,630 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7948fca2832,37923,1731756920143] 2024-11-16T11:35:20,634 INFO [RS:0;a7948fca2832:37923 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T11:35:20,636 INFO [RS:0;a7948fca2832:37923 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T11:35:20,636 INFO [RS:0;a7948fca2832:37923 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T11:35:20,636 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,639 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T11:35:20,640 INFO [RS:0;a7948fca2832:37923 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T11:35:20,640 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,640 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,640 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,640 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,640 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,640 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,641 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:35:20,641 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,641 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,641 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,641 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,641 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,641 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:20,641 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:35:20,641 DEBUG [RS:0;a7948fca2832:37923 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:35:20,642 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,642 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,642 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,642 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,642 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,642 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,37923,1731756920143-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:35:20,657 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T11:35:20,658 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,37923,1731756920143-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,658 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,658 INFO [RS:0;a7948fca2832:37923 {}] regionserver.Replication(171): a7948fca2832,37923,1731756920143 started 2024-11-16T11:35:20,672 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:20,672 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(1482): Serving as a7948fca2832,37923,1731756920143, RpcServer on a7948fca2832/172.17.0.2:37923, sessionid=0x101436d88320001 2024-11-16T11:35:20,673 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T11:35:20,673 DEBUG [RS:0;a7948fca2832:37923 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7948fca2832,37923,1731756920143 2024-11-16T11:35:20,673 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,37923,1731756920143' 2024-11-16T11:35:20,673 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T11:35:20,673 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T11:35:20,674 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T11:35:20,674 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T11:35:20,674 DEBUG [RS:0;a7948fca2832:37923 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7948fca2832,37923,1731756920143 2024-11-16T11:35:20,674 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,37923,1731756920143' 2024-11-16T11:35:20,674 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T11:35:20,675 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T11:35:20,675 DEBUG [RS:0;a7948fca2832:37923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T11:35:20,675 INFO [RS:0;a7948fca2832:37923 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T11:35:20,675 INFO [RS:0;a7948fca2832:37923 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T11:35:20,726 WARN [a7948fca2832:37457 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T11:35:20,780 INFO [RS:0;a7948fca2832:37923 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C37923%2C1731756920143, suffix=, logDir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/WALs/a7948fca2832,37923,1731756920143, archiveDir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/oldWALs, maxLogs=32 2024-11-16T11:35:20,784 INFO [RS:0;a7948fca2832:37923 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C37923%2C1731756920143.1731756920784 2024-11-16T11:35:20,794 INFO [RS:0;a7948fca2832:37923 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/WALs/a7948fca2832,37923,1731756920143/a7948fca2832%2C37923%2C1731756920143.1731756920784 2024-11-16T11:35:20,796 DEBUG [RS:0;a7948fca2832:37923 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35881:35881),(127.0.0.1/127.0.0.1:41185:41185)] 2024-11-16T11:35:20,976 DEBUG [a7948fca2832:37457 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T11:35:20,977 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7948fca2832,37923,1731756920143 2024-11-16T11:35:20,979 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,37923,1731756920143, state=OPENING 2024-11-16T11:35:21,029 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T11:35:21,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:21,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:21,040 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:35:21,040 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:35:21,040 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:35:21,040 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,37923,1731756920143}] 2024-11-16T11:35:21,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:35:21,138 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T11:35:21,141 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T11:35:21,194 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T11:35:21,197 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48305, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T11:35:21,203 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T11:35:21,204 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:35:21,207 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C37923%2C1731756920143.meta, suffix=.meta, logDir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/WALs/a7948fca2832,37923,1731756920143, archiveDir=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/oldWALs, maxLogs=32 2024-11-16T11:35:21,209 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C37923%2C1731756920143.meta.1731756921209.meta 2024-11-16T11:35:21,216 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/WALs/a7948fca2832,37923,1731756920143/a7948fca2832%2C37923%2C1731756920143.meta.1731756921209.meta 2024-11-16T11:35:21,216 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35881:35881),(127.0.0.1/127.0.0.1:41185:41185)] 2024-11-16T11:35:21,217 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:35:21,218 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T11:35:21,218 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T11:35:21,218 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T11:35:21,218 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T11:35:21,218 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:21,218 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T11:35:21,218 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T11:35:21,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:35:21,221 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:35:21,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:21,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:21,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:35:21,223 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:35:21,223 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:21,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:21,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:35:21,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:35:21,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:21,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:21,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:35:21,226 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:35:21,226 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:21,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:21,227 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:35:21,228 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740 2024-11-16T11:35:21,229 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740 2024-11-16T11:35:21,230 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:35:21,230 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:35:21,231 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:35:21,233 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:35:21,234 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=736029, jitterRate=-0.0640910416841507}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:35:21,234 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T11:35:21,235 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731756921218Writing region info on filesystem at 1731756921218Initializing all the Stores at 1731756921219 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756921219Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756921220 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756921220Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756921220Cleaning up temporary data from old regions at 1731756921230 (+10 ms)Running coprocessor post-open hooks at 1731756921234 (+4 ms)Region opened successfully at 1731756921235 (+1 ms) 2024-11-16T11:35:21,236 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731756921193 2024-11-16T11:35:21,238 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T11:35:21,239 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T11:35:21,240 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,37923,1731756920143 2024-11-16T11:35:21,241 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,37923,1731756920143, state=OPEN 2024-11-16T11:35:21,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:35:21,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:35:21,300 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7948fca2832,37923,1731756920143 2024-11-16T11:35:21,300 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:35:21,300 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:35:21,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T11:35:21,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,37923,1731756920143 in 260 msec 2024-11-16T11:35:21,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T11:35:21,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 734 msec 2024-11-16T11:35:21,314 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:35:21,314 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T11:35:21,316 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:35:21,316 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,37923,1731756920143, seqNum=-1] 2024-11-16T11:35:21,317 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:35:21,319 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41697, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:35:21,328 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 804 msec 2024-11-16T11:35:21,328 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731756921328, completionTime=-1 2024-11-16T11:35:21,328 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T11:35:21,328 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T11:35:21,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T11:35:21,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731756981330 2024-11-16T11:35:21,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731757041330 2024-11-16T11:35:21,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T11:35:21,331 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,37457,1731756919959-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:21,331 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,37457,1731756919959-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:21,331 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,37457,1731756919959-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:21,331 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7948fca2832:37457, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:21,331 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:21,331 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:21,333 DEBUG [master/a7948fca2832:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T11:35:21,336 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.117sec 2024-11-16T11:35:21,336 INFO [master/a7948fca2832:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T11:35:21,337 INFO [master/a7948fca2832:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T11:35:21,337 INFO [master/a7948fca2832:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T11:35:21,337 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T11:35:21,337 INFO [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T11:35:21,337 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,37457,1731756919959-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:35:21,337 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,37457,1731756919959-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T11:35:21,339 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T11:35:21,339 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T11:35:21,340 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,37457,1731756919959-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:21,375 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@709ecdab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:35:21,375 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7948fca2832,37457,-1 for getting cluster id 2024-11-16T11:35:21,375 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T11:35:21,378 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e036b658-ca35-4b31-8178-9be9883f9625' 2024-11-16T11:35:21,379 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T11:35:21,379 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e036b658-ca35-4b31-8178-9be9883f9625" 2024-11-16T11:35:21,379 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d34ab95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:35:21,380 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7948fca2832,37457,-1] 2024-11-16T11:35:21,380 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T11:35:21,380 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:21,383 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56266, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T11:35:21,385 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e9a9a32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:35:21,385 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:35:21,387 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,37923,1731756920143, seqNum=-1] 2024-11-16T11:35:21,388 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:35:21,390 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50906, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:35:21,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7948fca2832,37457,1731756919959 2024-11-16T11:35:21,394 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:21,398 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T11:35:21,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T11:35:21,399 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:35:21,399 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:35:21,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:21,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:21,399 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T11:35:21,400 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T11:35:21,400 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1035653102, stopped=false 2024-11-16T11:35:21,400 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7948fca2832,37457,1731756919959 2024-11-16T11:35:21,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:35:21,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:35:21,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:21,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:21,418 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:35:21,418 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:35:21,419 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:35:21,419 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:21,419 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:35:21,419 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:35:21,419 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7948fca2832,37923,1731756920143' ***** 2024-11-16T11:35:21,419 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T11:35:21,419 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T11:35:21,419 INFO [RS:0;a7948fca2832:37923 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T11:35:21,419 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T11:35:21,419 INFO [RS:0;a7948fca2832:37923 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T11:35:21,420 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(959): stopping server a7948fca2832,37923,1731756920143 2024-11-16T11:35:21,420 INFO [RS:0;a7948fca2832:37923 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:35:21,420 INFO [RS:0;a7948fca2832:37923 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7948fca2832:37923. 2024-11-16T11:35:21,420 DEBUG [RS:0;a7948fca2832:37923 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:35:21,420 DEBUG [RS:0;a7948fca2832:37923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:21,420 INFO [RS:0;a7948fca2832:37923 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T11:35:21,420 INFO [RS:0;a7948fca2832:37923 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T11:35:21,420 INFO [RS:0;a7948fca2832:37923 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T11:35:21,420 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T11:35:21,420 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T11:35:21,420 DEBUG [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T11:35:21,420 DEBUG [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T11:35:21,421 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:35:21,421 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:35:21,421 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:35:21,421 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:35:21,421 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:35:21,421 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T11:35:21,437 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740/.tmp/ns/a6b545495aac4991830bde0f1746f5ff is 43, key is default/ns:d/1731756921320/Put/seqid=0 2024-11-16T11:35:21,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741835_1011 (size=5153) 2024-11-16T11:35:21,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741835_1011 (size=5153) 2024-11-16T11:35:21,444 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740/.tmp/ns/a6b545495aac4991830bde0f1746f5ff 2024-11-16T11:35:21,452 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740/.tmp/ns/a6b545495aac4991830bde0f1746f5ff as hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740/ns/a6b545495aac4991830bde0f1746f5ff 2024-11-16T11:35:21,460 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740/ns/a6b545495aac4991830bde0f1746f5ff, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T11:35:21,461 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false 2024-11-16T11:35:21,461 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T11:35:21,467 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T11:35:21,467 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:35:21,467 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:35:21,467 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731756921420Running coprocessor pre-close hooks at 1731756921420Disabling compacts and flushes for region at 1731756921421 (+1 ms)Disabling writes for close at 1731756921421Obtaining lock to block concurrent updates at 1731756921421Preparing flush snapshotting stores in 1588230740 at 1731756921421Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731756921421Flushing stores of hbase:meta,,1.1588230740 at 1731756921422 (+1 ms)Flushing 1588230740/ns: creating writer at 1731756921422Flushing 1588230740/ns: appending metadata at 1731756921436 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731756921436Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1dd67c56: reopening flushed file at 1731756921451 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false at 1731756921461 (+10 ms)Writing region close event to WAL at 1731756921462 (+1 ms)Running coprocessor post-close hooks at 1731756921467 (+5 ms)Closed at 1731756921467 2024-11-16T11:35:21,468 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T11:35:21,621 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(976): stopping server a7948fca2832,37923,1731756920143; all regions closed. 2024-11-16T11:35:21,621 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,622 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,622 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,622 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,622 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741834_1010 (size=1152) 2024-11-16T11:35:21,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741834_1010 (size=1152) 2024-11-16T11:35:21,627 DEBUG [RS:0;a7948fca2832:37923 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/oldWALs 2024-11-16T11:35:21,627 INFO [RS:0;a7948fca2832:37923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C37923%2C1731756920143.meta:.meta(num 1731756921209) 2024-11-16T11:35:21,628 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,628 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,628 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,628 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,628 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741833_1009 (size=93) 2024-11-16T11:35:21,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741833_1009 (size=93) 2024-11-16T11:35:21,633 DEBUG [RS:0;a7948fca2832:37923 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/oldWALs 2024-11-16T11:35:21,633 INFO [RS:0;a7948fca2832:37923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C37923%2C1731756920143:(num 1731756920784) 2024-11-16T11:35:21,633 DEBUG [RS:0;a7948fca2832:37923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:21,633 INFO [RS:0;a7948fca2832:37923 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:35:21,633 INFO [RS:0;a7948fca2832:37923 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:35:21,633 INFO [RS:0;a7948fca2832:37923 {}] hbase.ChoreService(370): Chore service for: regionserver/a7948fca2832:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T11:35:21,633 INFO [RS:0;a7948fca2832:37923 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:35:21,633 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:35:21,634 INFO [RS:0;a7948fca2832:37923 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37923 2024-11-16T11:35:21,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:35:21,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7948fca2832,37923,1731756920143 2024-11-16T11:35:21,660 INFO [RS:0;a7948fca2832:37923 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:35:21,671 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7948fca2832,37923,1731756920143] 2024-11-16T11:35:21,681 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7948fca2832,37923,1731756920143 already deleted, retry=false 2024-11-16T11:35:21,681 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7948fca2832,37923,1731756920143 expired; onlineServers=0 2024-11-16T11:35:21,681 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7948fca2832,37457,1731756919959' ***** 2024-11-16T11:35:21,681 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T11:35:21,681 INFO [M:0;a7948fca2832:37457 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:35:21,681 INFO [M:0;a7948fca2832:37457 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:35:21,681 DEBUG [M:0;a7948fca2832:37457 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T11:35:21,682 DEBUG [M:0;a7948fca2832:37457 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T11:35:21,682 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T11:35:21,682 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756920532 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756920532,5,FailOnTimeoutGroup] 2024-11-16T11:35:21,682 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756920532 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756920532,5,FailOnTimeoutGroup] 2024-11-16T11:35:21,682 INFO [M:0;a7948fca2832:37457 {}] hbase.ChoreService(370): Chore service for: master/a7948fca2832:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T11:35:21,682 INFO [M:0;a7948fca2832:37457 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:35:21,682 DEBUG [M:0;a7948fca2832:37457 {}] master.HMaster(1795): Stopping service threads 2024-11-16T11:35:21,682 INFO [M:0;a7948fca2832:37457 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T11:35:21,682 INFO [M:0;a7948fca2832:37457 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:35:21,683 INFO [M:0;a7948fca2832:37457 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T11:35:21,683 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T11:35:21,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T11:35:21,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:21,692 DEBUG [M:0;a7948fca2832:37457 {}] zookeeper.ZKUtil(347): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T11:35:21,692 WARN [M:0;a7948fca2832:37457 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T11:35:21,693 INFO [M:0;a7948fca2832:37457 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/.lastflushedseqids 2024-11-16T11:35:21,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741836_1012 (size=99) 2024-11-16T11:35:21,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741836_1012 (size=99) 2024-11-16T11:35:21,703 INFO [M:0;a7948fca2832:37457 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T11:35:21,703 INFO [M:0;a7948fca2832:37457 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T11:35:21,703 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:35:21,703 INFO [M:0;a7948fca2832:37457 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:21,704 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:21,704 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:35:21,704 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:21,704 INFO [M:0;a7948fca2832:37457 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T11:35:21,719 DEBUG [M:0;a7948fca2832:37457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b8b101d8d30944209cb2cc2d74b193ee is 82, key is hbase:meta,,1/info:regioninfo/1731756921239/Put/seqid=0 2024-11-16T11:35:21,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741837_1013 (size=5672) 2024-11-16T11:35:21,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741837_1013 (size=5672) 2024-11-16T11:35:21,726 INFO [M:0;a7948fca2832:37457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b8b101d8d30944209cb2cc2d74b193ee 2024-11-16T11:35:21,748 DEBUG [M:0;a7948fca2832:37457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2c92ef224be14857b60719e93a3d5791 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731756921327/Put/seqid=0 2024-11-16T11:35:21,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741838_1014 (size=5275) 2024-11-16T11:35:21,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741838_1014 (size=5275) 2024-11-16T11:35:21,754 INFO [M:0;a7948fca2832:37457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2c92ef224be14857b60719e93a3d5791 2024-11-16T11:35:21,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:35:21,771 INFO [RS:0;a7948fca2832:37923 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:35:21,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37923-0x101436d88320001, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:35:21,771 INFO [RS:0;a7948fca2832:37923 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7948fca2832,37923,1731756920143; zookeeper connection closed. 2024-11-16T11:35:21,771 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@108f945b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@108f945b 2024-11-16T11:35:21,771 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T11:35:21,775 DEBUG [M:0;a7948fca2832:37457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/019d829376004ca58eaba7974e9a29f1 is 69, key is a7948fca2832,37923,1731756920143/rs:state/1731756920614/Put/seqid=0 2024-11-16T11:35:21,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741839_1015 (size=5156) 2024-11-16T11:35:21,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741839_1015 (size=5156) 2024-11-16T11:35:21,781 INFO [M:0;a7948fca2832:37457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/019d829376004ca58eaba7974e9a29f1 2024-11-16T11:35:21,802 DEBUG [M:0;a7948fca2832:37457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d90ca39299b0413eae21ca6c7f65bc16 is 52, key is load_balancer_on/state:d/1731756921397/Put/seqid=0 2024-11-16T11:35:21,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741840_1016 (size=5056) 2024-11-16T11:35:21,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741840_1016 (size=5056) 2024-11-16T11:35:21,808 INFO [M:0;a7948fca2832:37457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d90ca39299b0413eae21ca6c7f65bc16 2024-11-16T11:35:21,815 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b8b101d8d30944209cb2cc2d74b193ee as hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b8b101d8d30944209cb2cc2d74b193ee 2024-11-16T11:35:21,822 INFO [M:0;a7948fca2832:37457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b8b101d8d30944209cb2cc2d74b193ee, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T11:35:21,824 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2c92ef224be14857b60719e93a3d5791 as hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2c92ef224be14857b60719e93a3d5791 2024-11-16T11:35:21,831 INFO [M:0;a7948fca2832:37457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2c92ef224be14857b60719e93a3d5791, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T11:35:21,832 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/019d829376004ca58eaba7974e9a29f1 as hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/019d829376004ca58eaba7974e9a29f1 2024-11-16T11:35:21,838 INFO [M:0;a7948fca2832:37457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/019d829376004ca58eaba7974e9a29f1, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T11:35:21,840 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d90ca39299b0413eae21ca6c7f65bc16 as hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d90ca39299b0413eae21ca6c7f65bc16 2024-11-16T11:35:21,846 INFO [M:0;a7948fca2832:37457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39931/user/jenkins/test-data/5bba7ced-16b0-97cb-a83f-2e8b09009cd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d90ca39299b0413eae21ca6c7f65bc16, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T11:35:21,847 INFO [M:0;a7948fca2832:37457 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=29, compaction requested=false 2024-11-16T11:35:21,849 INFO [M:0;a7948fca2832:37457 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:21,849 DEBUG [M:0;a7948fca2832:37457 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731756921703Disabling compacts and flushes for region at 1731756921703Disabling writes for close at 1731756921704 (+1 ms)Obtaining lock to block concurrent updates at 1731756921704Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731756921704Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731756921704Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731756921705 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731756921705Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731756921719 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731756921719Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731756921732 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731756921747 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731756921747Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731756921759 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731756921774 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731756921774Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731756921787 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731756921802 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731756921802Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75b60c65: reopening flushed file at 1731756921814 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31e5a2d6: reopening flushed file at 1731756921823 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c794e47: reopening flushed file at 1731756921831 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6db43b7c: reopening flushed file at 1731756921838 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=29, compaction requested=false at 1731756921847 (+9 ms)Writing region close event to WAL at 1731756921849 (+2 ms)Closed at 1731756921849 2024-11-16T11:35:21,849 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,849 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,850 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,850 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,850 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:21,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35579 is added to blk_1073741830_1006 (size=10311) 2024-11-16T11:35:21,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37573 is added to blk_1073741830_1006 (size=10311) 2024-11-16T11:35:21,852 INFO [M:0;a7948fca2832:37457 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T11:35:21,852 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:35:21,853 INFO [M:0;a7948fca2832:37457 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37457 2024-11-16T11:35:21,853 INFO [M:0;a7948fca2832:37457 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:35:21,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:22,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:22,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:35:22,028 INFO [M:0;a7948fca2832:37457 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:35:22,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37457-0x101436d88320000, quorum=127.0.0.1:55918, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:35:22,030 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35bbb04a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:22,030 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13a73383{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:22,030 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:22,030 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22208119{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:22,031 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72380cd1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:22,032 WARN [BP-1135396281-172.17.0.2-1731756917579 heartbeating to localhost/127.0.0.1:39931 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:35:22,032 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:35:22,032 WARN [BP-1135396281-172.17.0.2-1731756917579 heartbeating to localhost/127.0.0.1:39931 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1135396281-172.17.0.2-1731756917579 (Datanode Uuid 30c0e00d-9aeb-41ee-b836-71011c099ade) service to localhost/127.0.0.1:39931 2024-11-16T11:35:22,032 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:35:22,033 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/data/data3/current/BP-1135396281-172.17.0.2-1731756917579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:22,033 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/data/data4/current/BP-1135396281-172.17.0.2-1731756917579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:22,033 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:35:22,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6cc6305e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:22,041 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50f2a115{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:22,041 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:22,041 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ba968b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:22,041 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dfbaab6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:22,043 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:35:22,043 WARN [BP-1135396281-172.17.0.2-1731756917579 heartbeating to localhost/127.0.0.1:39931 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:35:22,043 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:35:22,043 WARN [BP-1135396281-172.17.0.2-1731756917579 heartbeating to localhost/127.0.0.1:39931 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1135396281-172.17.0.2-1731756917579 (Datanode Uuid d5d73470-e59e-4937-a603-7f664b1a68d0) service to localhost/127.0.0.1:39931 2024-11-16T11:35:22,044 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/data/data1/current/BP-1135396281-172.17.0.2-1731756917579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:22,044 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/cluster_190becb6-4d50-3dc8-1519-b1a346502a64/data/data2/current/BP-1135396281-172.17.0.2-1731756917579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:22,044 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:35:22,049 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68e16ee6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:35:22,050 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65d95c6f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:22,050 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:22,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3839647f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:22,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@766cfc4e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:22,058 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T11:35:22,084 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T11:35:22,084 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T11:35:22,084 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.log.dir so I do NOT create it in target/test-data/987d1844-857e-fb23-a6fd-139d0983e326 2024-11-16T11:35:22,084 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/41cec8a7-ea78-2644-72fe-1109ac2b6278/hadoop.tmp.dir so I do NOT create it in target/test-data/987d1844-857e-fb23-a6fd-139d0983e326 2024-11-16T11:35:22,084 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59, deleteOnExit=true 2024-11-16T11:35:22,084 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T11:35:22,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/test.cache.data in system properties and HBase conf 2024-11-16T11:35:22,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T11:35:22,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir in system properties and HBase conf 2024-11-16T11:35:22,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T11:35:22,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T11:35:22,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T11:35:22,085 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T11:35:22,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:35:22,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:35:22,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T11:35:22,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:35:22,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T11:35:22,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T11:35:22,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:35:22,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:35:22,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T11:35:22,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/nfs.dump.dir in system properties and HBase conf 2024-11-16T11:35:22,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/java.io.tmpdir in system properties and HBase conf 2024-11-16T11:35:22,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:35:22,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T11:35:22,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T11:35:22,106 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:35:22,452 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-16T11:35:22,454 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:22,468 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:22,470 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:22,471 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:22,492 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:22,497 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:22,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:22,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:22,498 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:35:22,501 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:22,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ed0b53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:22,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@82e7b75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:22,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b1223a8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/java.io.tmpdir/jetty-localhost-43725-hadoop-hdfs-3_4_1-tests_jar-_-any-2254541840424573905/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:35:22,597 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b8a0b06{HTTP/1.1, (http/1.1)}{localhost:43725} 2024-11-16T11:35:22,597 INFO [Time-limited test {}] server.Server(415): Started @107819ms 2024-11-16T11:35:22,608 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:35:22,643 INFO [regionserver/a7948fca2832:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:35:22,890 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:22,894 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:22,895 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:22,895 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:22,895 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:35:22,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7150e922{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:22,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29d3f1c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:22,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50936d12{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/java.io.tmpdir/jetty-localhost-42813-hadoop-hdfs-3_4_1-tests_jar-_-any-10737987654964517201/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:22,990 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59304df5{HTTP/1.1, (http/1.1)}{localhost:42813} 2024-11-16T11:35:22,990 INFO [Time-limited test {}] server.Server(415): Started @108212ms 2024-11-16T11:35:22,992 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:35:23,019 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:23,022 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:23,023 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:23,023 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:23,023 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:35:23,024 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37edf9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:23,024 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@129c308c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:23,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78b7f97a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/java.io.tmpdir/jetty-localhost-40941-hadoop-hdfs-3_4_1-tests_jar-_-any-17831967285219946802/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:23,120 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e4b1502{HTTP/1.1, (http/1.1)}{localhost:40941} 2024-11-16T11:35:23,120 INFO [Time-limited test {}] server.Server(415): Started @108342ms 2024-11-16T11:35:23,121 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:35:24,293 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data1/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:24,293 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data2/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:24,314 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:35:24,317 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f9271d82d634a1e with lease ID 0x839e6fff83800219: Processing first storage report for DS-713c46c1-8c9c-4a94-9576-bd0b484304aa from datanode DatanodeRegistration(127.0.0.1:43447, datanodeUuid=69afaec0-a137-45bb-899a-2d2b75843e0b, infoPort=42499, infoSecurePort=0, ipcPort=44953, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:24,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f9271d82d634a1e with lease ID 0x839e6fff83800219: from storage DS-713c46c1-8c9c-4a94-9576-bd0b484304aa node DatanodeRegistration(127.0.0.1:43447, datanodeUuid=69afaec0-a137-45bb-899a-2d2b75843e0b, infoPort=42499, infoSecurePort=0, ipcPort=44953, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:24,317 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f9271d82d634a1e with lease ID 0x839e6fff83800219: Processing first storage report for DS-a909c50a-6c3a-4771-bbba-24bc82c00cf6 from datanode DatanodeRegistration(127.0.0.1:43447, datanodeUuid=69afaec0-a137-45bb-899a-2d2b75843e0b, infoPort=42499, infoSecurePort=0, ipcPort=44953, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:24,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f9271d82d634a1e with lease ID 0x839e6fff83800219: from storage DS-a909c50a-6c3a-4771-bbba-24bc82c00cf6 node DatanodeRegistration(127.0.0.1:43447, datanodeUuid=69afaec0-a137-45bb-899a-2d2b75843e0b, infoPort=42499, infoSecurePort=0, ipcPort=44953, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:24,436 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data3/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:24,436 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data4/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:24,455 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:35:24,458 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x74196424046d0be1 with lease ID 0x839e6fff8380021a: Processing first storage report for DS-590e89d7-197c-4113-851d-bcf056491e78 from datanode DatanodeRegistration(127.0.0.1:42905, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=46733, infoSecurePort=0, ipcPort=36575, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:24,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x74196424046d0be1 with lease ID 0x839e6fff8380021a: from storage DS-590e89d7-197c-4113-851d-bcf056491e78 node DatanodeRegistration(127.0.0.1:42905, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=46733, infoSecurePort=0, ipcPort=36575, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:24,458 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x74196424046d0be1 with lease ID 0x839e6fff8380021a: Processing first storage report for DS-13ead2df-b1b4-4f4b-aa7b-9efef1ff3a5a from datanode DatanodeRegistration(127.0.0.1:42905, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=46733, infoSecurePort=0, ipcPort=36575, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:24,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x74196424046d0be1 with lease ID 0x839e6fff8380021a: from storage DS-13ead2df-b1b4-4f4b-aa7b-9efef1ff3a5a node DatanodeRegistration(127.0.0.1:42905, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=46733, infoSecurePort=0, ipcPort=36575, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:24,558 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326 2024-11-16T11:35:24,562 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/zookeeper_0, clientPort=52242, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T11:35:24,563 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52242 2024-11-16T11:35:24,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:24,565 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:24,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43447 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:35:24,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42905 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:35:24,579 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b with version=8 2024-11-16T11:35:24,579 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/hbase-staging 2024-11-16T11:35:24,581 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:35:24,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:24,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:24,582 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:35:24,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:24,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:35:24,582 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T11:35:24,582 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:35:24,583 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42791 2024-11-16T11:35:24,585 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42791 connecting to ZooKeeper ensemble=127.0.0.1:52242 2024-11-16T11:35:24,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:427910x0, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:35:24,660 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42791-0x101436d9a3c0000 connected 2024-11-16T11:35:24,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:24,749 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:24,752 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:35:24,753 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b, hbase.cluster.distributed=false 2024-11-16T11:35:24,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:35:24,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42791 2024-11-16T11:35:24,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42791 2024-11-16T11:35:24,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42791 2024-11-16T11:35:24,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42791 2024-11-16T11:35:24,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42791 2024-11-16T11:35:24,773 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:35:24,773 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:24,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:24,774 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:35:24,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:24,774 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:35:24,774 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T11:35:24,774 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:35:24,775 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40843 2024-11-16T11:35:24,776 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40843 connecting to ZooKeeper ensemble=127.0.0.1:52242 2024-11-16T11:35:24,776 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:24,778 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:24,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:408430x0, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:35:24,790 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40843-0x101436d9a3c0001 connected 2024-11-16T11:35:24,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:35:24,790 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T11:35:24,791 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T11:35:24,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T11:35:24,793 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:35:24,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40843 2024-11-16T11:35:24,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40843 2024-11-16T11:35:24,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40843 2024-11-16T11:35:24,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40843 2024-11-16T11:35:24,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40843 2024-11-16T11:35:24,809 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7948fca2832:42791 2024-11-16T11:35:24,810 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7948fca2832,42791,1731756924581 2024-11-16T11:35:24,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:35:24,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:35:24,822 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7948fca2832,42791,1731756924581 2024-11-16T11:35:24,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T11:35:24,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:24,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:24,832 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T11:35:24,833 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7948fca2832,42791,1731756924581 from backup master directory 2024-11-16T11:35:24,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7948fca2832,42791,1731756924581 2024-11-16T11:35:24,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:35:24,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:35:24,843 WARN [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:35:24,843 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7948fca2832,42791,1731756924581 2024-11-16T11:35:24,850 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/hbase.id] with ID: b4ba8665-0541-415c-b92a-c0a212a14880 2024-11-16T11:35:24,850 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/.tmp/hbase.id 2024-11-16T11:35:24,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43447 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:35:24,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42905 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:35:24,858 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/.tmp/hbase.id]:[hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/hbase.id] 2024-11-16T11:35:24,871 INFO [master/a7948fca2832:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:24,871 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T11:35:24,873 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T11:35:24,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:24,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:24,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43447 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:35:24,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42905 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:35:24,892 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:35:24,893 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T11:35:24,893 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:35:24,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42905 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:35:24,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43447 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:35:24,904 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store 2024-11-16T11:35:24,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43447 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:35:24,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42905 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:35:24,912 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:24,912 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:35:24,912 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:24,912 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:24,912 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:35:24,912 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:24,912 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:35:24,913 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731756924912Disabling compacts and flushes for region at 1731756924912Disabling writes for close at 1731756924912Writing region close event to WAL at 1731756924912Closed at 1731756924912 2024-11-16T11:35:24,913 WARN [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/.initializing 2024-11-16T11:35:24,914 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581 2024-11-16T11:35:24,917 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C42791%2C1731756924581, suffix=, logDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581, archiveDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/oldWALs, maxLogs=10 2024-11-16T11:35:24,917 INFO [master/a7948fca2832:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C42791%2C1731756924581.1731756924917 2024-11-16T11:35:24,924 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 2024-11-16T11:35:24,925 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42499:42499),(127.0.0.1/127.0.0.1:46733:46733)] 2024-11-16T11:35:24,926 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:35:24,926 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:24,927 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,927 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,928 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T11:35:24,930 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:24,931 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:24,931 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T11:35:24,932 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:24,933 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:35:24,933 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,934 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T11:35:24,934 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:24,935 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:35:24,935 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,937 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T11:35:24,937 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:24,937 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:35:24,937 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,939 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,939 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,941 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,941 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,942 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T11:35:24,944 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:35:24,947 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:35:24,948 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849542, jitterRate=0.08024871349334717}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T11:35:24,949 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731756924927Initializing all the Stores at 1731756924928 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756924928Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756924928Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756924928Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756924928Cleaning up temporary data from old regions at 1731756924941 (+13 ms)Region opened successfully at 1731756924949 (+8 ms) 2024-11-16T11:35:24,950 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T11:35:24,954 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ec548f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:35:24,955 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T11:35:24,955 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T11:35:24,955 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T11:35:24,955 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T11:35:24,956 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T11:35:24,956 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T11:35:24,956 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T11:35:24,958 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T11:35:24,959 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T11:35:24,968 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T11:35:24,969 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T11:35:24,970 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T11:35:24,979 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T11:35:24,979 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T11:35:24,980 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T11:35:24,989 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T11:35:24,991 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T11:35:25,000 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T11:35:25,005 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T11:35:25,018 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T11:35:25,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:35:25,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:35:25,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:25,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:25,034 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7948fca2832,42791,1731756924581, sessionid=0x101436d9a3c0000, setting cluster-up flag (Was=false) 2024-11-16T11:35:25,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:25,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:25,084 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T11:35:25,087 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,42791,1731756924581 2024-11-16T11:35:25,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:25,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:25,137 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T11:35:25,139 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,42791,1731756924581 2024-11-16T11:35:25,141 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T11:35:25,144 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T11:35:25,144 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T11:35:25,144 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T11:35:25,145 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7948fca2832,42791,1731756924581 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T11:35:25,147 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:35:25,148 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:35:25,148 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:35:25,148 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:35:25,148 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7948fca2832:0, corePoolSize=10, maxPoolSize=10 2024-11-16T11:35:25,148 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,148 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:35:25,148 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,149 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731756955149 2024-11-16T11:35:25,149 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T11:35:25,149 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T11:35:25,149 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T11:35:25,149 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T11:35:25,149 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T11:35:25,149 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T11:35:25,150 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,150 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T11:35:25,150 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T11:35:25,150 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T11:35:25,150 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:35:25,150 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T11:35:25,150 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T11:35:25,150 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T11:35:25,151 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756925151,5,FailOnTimeoutGroup] 2024-11-16T11:35:25,151 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756925151,5,FailOnTimeoutGroup] 2024-11-16T11:35:25,151 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,151 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T11:35:25,151 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,151 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,152 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:25,152 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T11:35:25,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42905 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:35:25,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43447 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:35:25,196 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(746): ClusterId : b4ba8665-0541-415c-b92a-c0a212a14880 2024-11-16T11:35:25,197 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T11:35:25,208 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T11:35:25,208 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T11:35:25,222 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T11:35:25,222 DEBUG [RS:0;a7948fca2832:40843 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@675b9aaf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:35:25,234 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7948fca2832:40843 2024-11-16T11:35:25,234 INFO [RS:0;a7948fca2832:40843 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T11:35:25,234 INFO [RS:0;a7948fca2832:40843 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T11:35:25,234 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T11:35:25,235 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7948fca2832,42791,1731756924581 with port=40843, startcode=1731756924773 2024-11-16T11:35:25,235 DEBUG [RS:0;a7948fca2832:40843 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T11:35:25,237 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33425, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T11:35:25,238 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42791 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7948fca2832,40843,1731756924773 2024-11-16T11:35:25,238 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42791 {}] master.ServerManager(517): Registering regionserver=a7948fca2832,40843,1731756924773 2024-11-16T11:35:25,240 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b 2024-11-16T11:35:25,240 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39669 2024-11-16T11:35:25,240 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T11:35:25,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:35:25,253 DEBUG [RS:0;a7948fca2832:40843 {}] zookeeper.ZKUtil(111): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7948fca2832,40843,1731756924773 2024-11-16T11:35:25,253 WARN [RS:0;a7948fca2832:40843 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:35:25,253 INFO [RS:0;a7948fca2832:40843 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:35:25,253 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773 2024-11-16T11:35:25,254 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7948fca2832,40843,1731756924773] 2024-11-16T11:35:25,258 INFO [RS:0;a7948fca2832:40843 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T11:35:25,261 INFO [RS:0;a7948fca2832:40843 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T11:35:25,261 INFO [RS:0;a7948fca2832:40843 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T11:35:25,261 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,262 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T11:35:25,263 INFO [RS:0;a7948fca2832:40843 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T11:35:25,263 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,263 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,263 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,263 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:25,264 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:35:25,265 DEBUG [RS:0;a7948fca2832:40843 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:35:25,268 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,268 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,268 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,269 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,269 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,269 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,40843,1731756924773-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:35:25,293 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T11:35:25,293 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,40843,1731756924773-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,293 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,293 INFO [RS:0;a7948fca2832:40843 {}] regionserver.Replication(171): a7948fca2832,40843,1731756924773 started 2024-11-16T11:35:25,314 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:25,314 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1482): Serving as a7948fca2832,40843,1731756924773, RpcServer on a7948fca2832/172.17.0.2:40843, sessionid=0x101436d9a3c0001 2024-11-16T11:35:25,314 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T11:35:25,314 DEBUG [RS:0;a7948fca2832:40843 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7948fca2832,40843,1731756924773 2024-11-16T11:35:25,314 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,40843,1731756924773' 2024-11-16T11:35:25,314 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T11:35:25,315 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T11:35:25,315 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T11:35:25,315 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T11:35:25,315 DEBUG [RS:0;a7948fca2832:40843 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7948fca2832,40843,1731756924773 2024-11-16T11:35:25,315 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,40843,1731756924773' 2024-11-16T11:35:25,315 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T11:35:25,316 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T11:35:25,316 DEBUG [RS:0;a7948fca2832:40843 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T11:35:25,316 INFO [RS:0;a7948fca2832:40843 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T11:35:25,316 INFO [RS:0;a7948fca2832:40843 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T11:35:25,418 INFO [RS:0;a7948fca2832:40843 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C40843%2C1731756924773, suffix=, logDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773, archiveDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs, maxLogs=32 2024-11-16T11:35:25,419 INFO [RS:0;a7948fca2832:40843 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C40843%2C1731756924773.1731756925419 2024-11-16T11:35:25,426 INFO [RS:0;a7948fca2832:40843 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 2024-11-16T11:35:25,428 DEBUG [RS:0;a7948fca2832:40843 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46733:46733),(127.0.0.1/127.0.0.1:42499:42499)] 2024-11-16T11:35:25,562 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T11:35:25,563 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b 2024-11-16T11:35:25,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43447 is added to blk_1073741833_1009 (size=32) 2024-11-16T11:35:25,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42905 is added to blk_1073741833_1009 (size=32) 2024-11-16T11:35:25,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:25,577 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:35:25,580 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:35:25,580 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:25,580 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:25,581 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:35:25,583 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:35:25,583 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:25,584 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:25,584 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:35:25,586 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:35:25,586 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:25,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:25,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:35:25,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:35:25,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:25,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:25,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:35:25,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740 2024-11-16T11:35:25,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740 2024-11-16T11:35:25,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:35:25,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:35:25,593 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:35:25,594 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:35:25,596 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:35:25,597 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710085, jitterRate=-0.09708032011985779}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:35:25,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731756925575Initializing all the Stores at 1731756925576 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756925577 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756925577Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756925577Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756925577Cleaning up temporary data from old regions at 1731756925592 (+15 ms)Region opened successfully at 1731756925597 (+5 ms) 2024-11-16T11:35:25,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:35:25,598 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:35:25,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:35:25,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:35:25,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:35:25,598 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:35:25,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731756925598Disabling compacts and flushes for region at 1731756925598Disabling writes for close at 1731756925598Writing region close event to WAL at 1731756925598Closed at 1731756925598 2024-11-16T11:35:25,600 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:35:25,600 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T11:35:25,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T11:35:25,601 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:35:25,602 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T11:35:25,753 DEBUG [a7948fca2832:42791 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T11:35:25,754 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7948fca2832,40843,1731756924773 2024-11-16T11:35:25,757 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,40843,1731756924773, state=OPENING 2024-11-16T11:35:25,810 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T11:35:25,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:25,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:35:25,822 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:35:25,822 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:35:25,822 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,40843,1731756924773}] 2024-11-16T11:35:25,822 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:35:25,976 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T11:35:25,979 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40955, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T11:35:25,983 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T11:35:25,983 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:35:25,985 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C40843%2C1731756924773.meta, suffix=.meta, logDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773, archiveDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs, maxLogs=32 2024-11-16T11:35:25,986 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta 2024-11-16T11:35:26,001 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta 2024-11-16T11:35:26,008 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42499:42499),(127.0.0.1/127.0.0.1:46733:46733)] 2024-11-16T11:35:26,011 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:35:26,011 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T11:35:26,011 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T11:35:26,012 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T11:35:26,012 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T11:35:26,012 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:26,012 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T11:35:26,012 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T11:35:26,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:35:26,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:35:26,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:26,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:26,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:35:26,016 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:35:26,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:26,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:26,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:35:26,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:35:26,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:26,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:26,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:35:26,019 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:35:26,019 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:26,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:35:26,020 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:35:26,021 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740 2024-11-16T11:35:26,022 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740 2024-11-16T11:35:26,024 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:35:26,024 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:35:26,024 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:35:26,025 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:35:26,026 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778958, jitterRate=-0.009504944086074829}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:35:26,026 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T11:35:26,027 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731756926012Writing region info on filesystem at 1731756926012Initializing all the Stores at 1731756926013 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756926013Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756926013Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756926013Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756926014 (+1 ms)Cleaning up temporary data from old regions at 1731756926024 (+10 ms)Running coprocessor post-open hooks at 1731756926026 (+2 ms)Region opened successfully at 1731756926027 (+1 ms) 2024-11-16T11:35:26,028 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731756925976 2024-11-16T11:35:26,031 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T11:35:26,031 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T11:35:26,032 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,40843,1731756924773 2024-11-16T11:35:26,033 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,40843,1731756924773, state=OPEN 2024-11-16T11:35:26,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:35:26,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:35:26,075 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7948fca2832,40843,1731756924773 2024-11-16T11:35:26,075 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:35:26,075 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:35:26,079 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T11:35:26,079 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,40843,1731756924773 in 253 msec 2024-11-16T11:35:26,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T11:35:26,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 479 msec 2024-11-16T11:35:26,083 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:35:26,083 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T11:35:26,084 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:35:26,084 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,40843,1731756924773, seqNum=-1] 2024-11-16T11:35:26,085 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:35:26,086 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41817, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:35:26,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 949 msec 2024-11-16T11:35:26,092 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731756926092, completionTime=-1 2024-11-16T11:35:26,092 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T11:35:26,092 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T11:35:26,094 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T11:35:26,094 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731756986094 2024-11-16T11:35:26,094 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731757046094 2024-11-16T11:35:26,095 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T11:35:26,095 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42791,1731756924581-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,095 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42791,1731756924581-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,095 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42791,1731756924581-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,095 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7948fca2832:42791, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,095 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,095 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,097 DEBUG [master/a7948fca2832:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T11:35:26,099 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.256sec 2024-11-16T11:35:26,099 INFO [master/a7948fca2832:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T11:35:26,099 INFO [master/a7948fca2832:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T11:35:26,099 INFO [master/a7948fca2832:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T11:35:26,100 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T11:35:26,100 INFO [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T11:35:26,100 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42791,1731756924581-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:35:26,100 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42791,1731756924581-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T11:35:26,102 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T11:35:26,103 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T11:35:26,103 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,42791,1731756924581-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,198 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53c61fd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:35:26,198 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7948fca2832,42791,-1 for getting cluster id 2024-11-16T11:35:26,198 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T11:35:26,201 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b4ba8665-0541-415c-b92a-c0a212a14880' 2024-11-16T11:35:26,202 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T11:35:26,202 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b4ba8665-0541-415c-b92a-c0a212a14880" 2024-11-16T11:35:26,203 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4429846e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:35:26,203 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7948fca2832,42791,-1] 2024-11-16T11:35:26,203 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T11:35:26,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:35:26,206 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41006, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T11:35:26,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63e7f9e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:35:26,208 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:35:26,209 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,40843,1731756924773, seqNum=-1] 2024-11-16T11:35:26,209 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:35:26,210 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33890, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:35:26,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7948fca2832,42791,1731756924581 2024-11-16T11:35:26,212 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:26,215 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T11:35:26,230 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:35:26,230 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:26,230 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:26,230 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:35:26,230 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:35:26,230 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:35:26,230 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T11:35:26,230 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:35:26,231 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46401 2024-11-16T11:35:26,232 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46401 connecting to ZooKeeper ensemble=127.0.0.1:52242 2024-11-16T11:35:26,233 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:26,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:35:26,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:464010x0, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:35:26,253 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46401-0x101436d9a3c0002 connected 2024-11-16T11:35:26,253 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-16T11:35:26,253 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-16T11:35:26,254 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T11:35:26,255 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T11:35:26,256 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T11:35:26,258 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:35:26,259 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46401 2024-11-16T11:35:26,259 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46401 2024-11-16T11:35:26,259 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46401 2024-11-16T11:35:26,260 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46401 2024-11-16T11:35:26,260 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46401 2024-11-16T11:35:26,263 INFO [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(746): ClusterId : b4ba8665-0541-415c-b92a-c0a212a14880 2024-11-16T11:35:26,263 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T11:35:26,275 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T11:35:26,275 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T11:35:26,285 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T11:35:26,286 DEBUG [RS:1;a7948fca2832:46401 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@146d41d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:35:26,303 DEBUG [RS:1;a7948fca2832:46401 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a7948fca2832:46401 2024-11-16T11:35:26,303 INFO [RS:1;a7948fca2832:46401 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T11:35:26,303 INFO [RS:1;a7948fca2832:46401 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T11:35:26,304 DEBUG [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T11:35:26,304 INFO [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7948fca2832,42791,1731756924581 with port=46401, startcode=1731756926229 2024-11-16T11:35:26,304 DEBUG [RS:1;a7948fca2832:46401 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T11:35:26,306 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36795, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T11:35:26,306 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42791 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7948fca2832,46401,1731756926229 2024-11-16T11:35:26,306 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42791 {}] master.ServerManager(517): Registering regionserver=a7948fca2832,46401,1731756926229 2024-11-16T11:35:26,308 DEBUG [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b 2024-11-16T11:35:26,308 DEBUG [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39669 2024-11-16T11:35:26,308 DEBUG [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T11:35:26,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:35:26,316 DEBUG [RS:1;a7948fca2832:46401 {}] zookeeper.ZKUtil(111): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7948fca2832,46401,1731756926229 2024-11-16T11:35:26,316 WARN [RS:1;a7948fca2832:46401 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:35:26,316 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7948fca2832,46401,1731756926229] 2024-11-16T11:35:26,316 INFO [RS:1;a7948fca2832:46401 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:35:26,316 DEBUG [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229 2024-11-16T11:35:26,320 INFO [RS:1;a7948fca2832:46401 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T11:35:26,323 INFO [RS:1;a7948fca2832:46401 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T11:35:26,323 INFO [RS:1;a7948fca2832:46401 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T11:35:26,324 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,324 INFO [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T11:35:26,325 INFO [RS:1;a7948fca2832:46401 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T11:35:26,325 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,325 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,325 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,325 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,325 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,325 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,325 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:35:26,325 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,325 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,325 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,326 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,326 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,326 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:35:26,326 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:35:26,326 DEBUG [RS:1;a7948fca2832:46401 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:35:26,326 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,326 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,326 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,326 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,326 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,326 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,46401,1731756926229-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:35:26,341 INFO [RS:1;a7948fca2832:46401 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T11:35:26,342 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,46401,1731756926229-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,342 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,342 INFO [RS:1;a7948fca2832:46401 {}] regionserver.Replication(171): a7948fca2832,46401,1731756926229 started 2024-11-16T11:35:26,354 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:35:26,354 INFO [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(1482): Serving as a7948fca2832,46401,1731756926229, RpcServer on a7948fca2832/172.17.0.2:46401, sessionid=0x101436d9a3c0002 2024-11-16T11:35:26,354 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T11:35:26,354 DEBUG [RS:1;a7948fca2832:46401 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7948fca2832,46401,1731756926229 2024-11-16T11:35:26,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;a7948fca2832:46401,5,FailOnTimeoutGroup] 2024-11-16T11:35:26,355 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,46401,1731756926229' 2024-11-16T11:35:26,355 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T11:35:26,355 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-16T11:35:26,355 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T11:35:26,355 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T11:35:26,356 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T11:35:26,356 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T11:35:26,356 DEBUG [RS:1;a7948fca2832:46401 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7948fca2832,46401,1731756926229 2024-11-16T11:35:26,356 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,46401,1731756926229' 2024-11-16T11:35:26,356 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T11:35:26,356 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T11:35:26,356 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is a7948fca2832,42791,1731756924581 2024-11-16T11:35:26,356 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2a7612cc 2024-11-16T11:35:26,357 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T11:35:26,357 DEBUG [RS:1;a7948fca2832:46401 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T11:35:26,357 INFO [RS:1;a7948fca2832:46401 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T11:35:26,357 INFO [RS:1;a7948fca2832:46401 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T11:35:26,358 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41022, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T11:35:26,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42791 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T11:35:26,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42791 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T11:35:26,359 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42791 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:35:26,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42791 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T11:35:26,362 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T11:35:26,362 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:26,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42791 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-16T11:35:26,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:35:26,363 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T11:35:26,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43447 is added to blk_1073741835_1011 (size=393) 2024-11-16T11:35:26,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42905 is added to blk_1073741835_1011 (size=393) 2024-11-16T11:35:26,376 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7550ed67a678c82a6324e4ade595ce68, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b 2024-11-16T11:35:26,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43447 is added to blk_1073741836_1012 (size=76) 2024-11-16T11:35:26,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42905 is added to blk_1073741836_1012 (size=76) 2024-11-16T11:35:26,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:26,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 7550ed67a678c82a6324e4ade595ce68, disabling compactions & flushes 2024-11-16T11:35:26,383 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:26,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:26,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. after waiting 0 ms 2024-11-16T11:35:26,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:26,383 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:26,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7550ed67a678c82a6324e4ade595ce68: Waiting for close lock at 1731756926383Disabling compacts and flushes for region at 1731756926383Disabling writes for close at 1731756926383Writing region close event to WAL at 1731756926383Closed at 1731756926383 2024-11-16T11:35:26,384 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T11:35:26,385 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731756926384"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731756926384"}]},"ts":"1731756926384"} 2024-11-16T11:35:26,387 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T11:35:26,388 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T11:35:26,389 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731756926388"}]},"ts":"1731756926388"} 2024-11-16T11:35:26,391 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-16T11:35:26,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7550ed67a678c82a6324e4ade595ce68, ASSIGN}] 2024-11-16T11:35:26,393 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7550ed67a678c82a6324e4ade595ce68, ASSIGN 2024-11-16T11:35:26,394 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7550ed67a678c82a6324e4ade595ce68, ASSIGN; state=OFFLINE, location=a7948fca2832,40843,1731756924773; forceNewPlan=false, retain=false 2024-11-16T11:35:26,461 INFO [RS:1;a7948fca2832:46401 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C46401%2C1731756926229, suffix=, logDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229, archiveDir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs, maxLogs=32 2024-11-16T11:35:26,463 INFO [RS:1;a7948fca2832:46401 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C46401%2C1731756926229.1731756926462 2024-11-16T11:35:26,471 INFO [RS:1;a7948fca2832:46401 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 2024-11-16T11:35:26,473 DEBUG [RS:1;a7948fca2832:46401 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42499:42499),(127.0.0.1/127.0.0.1:46733:46733)] 2024-11-16T11:35:26,545 INFO [a7948fca2832:42791 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-16T11:35:26,545 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7550ed67a678c82a6324e4ade595ce68, regionState=OPENING, regionLocation=a7948fca2832,40843,1731756924773 2024-11-16T11:35:26,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7550ed67a678c82a6324e4ade595ce68, ASSIGN because future has completed 2024-11-16T11:35:26,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7550ed67a678c82a6324e4ade595ce68, server=a7948fca2832,40843,1731756924773}] 2024-11-16T11:35:26,708 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:26,709 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7550ed67a678c82a6324e4ade595ce68, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:35:26,709 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,709 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:35:26,709 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,709 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,711 INFO [StoreOpener-7550ed67a678c82a6324e4ade595ce68-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,713 INFO [StoreOpener-7550ed67a678c82a6324e4ade595ce68-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7550ed67a678c82a6324e4ade595ce68 columnFamilyName info 2024-11-16T11:35:26,713 DEBUG [StoreOpener-7550ed67a678c82a6324e4ade595ce68-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:35:26,713 INFO [StoreOpener-7550ed67a678c82a6324e4ade595ce68-1 {}] regionserver.HStore(327): Store=7550ed67a678c82a6324e4ade595ce68/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:35:26,713 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,714 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,715 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,715 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,715 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,717 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,719 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:35:26,720 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7550ed67a678c82a6324e4ade595ce68; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712325, jitterRate=-0.09423238039016724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T11:35:26,720 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:26,721 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7550ed67a678c82a6324e4ade595ce68: Running coprocessor pre-open hook at 1731756926709Writing region info on filesystem at 1731756926710 (+1 ms)Initializing all the Stores at 1731756926711 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756926711Cleaning up temporary data from old regions at 1731756926715 (+4 ms)Running coprocessor post-open hooks at 1731756926720 (+5 ms)Region opened successfully at 1731756926720 2024-11-16T11:35:26,722 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68., pid=6, masterSystemTime=1731756926704 2024-11-16T11:35:26,725 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:26,725 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:26,726 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7550ed67a678c82a6324e4ade595ce68, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,40843,1731756924773 2024-11-16T11:35:26,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7550ed67a678c82a6324e4ade595ce68, server=a7948fca2832,40843,1731756924773 because future has completed 2024-11-16T11:35:26,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T11:35:26,733 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7550ed67a678c82a6324e4ade595ce68, server=a7948fca2832,40843,1731756924773 in 180 msec 2024-11-16T11:35:26,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T11:35:26,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7550ed67a678c82a6324e4ade595ce68, ASSIGN in 342 msec 2024-11-16T11:35:26,736 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T11:35:26,736 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731756926736"}]},"ts":"1731756926736"} 2024-11-16T11:35:26,738 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-16T11:35:26,740 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T11:35:26,742 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 381 msec 2024-11-16T11:35:31,516 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T11:35:31,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:31,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:31,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:31,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:35:31,552 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T11:35:31,552 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T11:35:31,552 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T11:35:31,552 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-16T11:35:31,553 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:35:31,553 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T11:35:31,553 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-16T11:35:36,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42791 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:35:36,446 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-16T11:35:36,446 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-16T11:35:36,450 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T11:35:36,450 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:36,466 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:36,476 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:36,477 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:36,477 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:36,477 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:35:36,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18ce565e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:36,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77f120ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:36,584 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@458915b3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/java.io.tmpdir/jetty-localhost-39447-hadoop-hdfs-3_4_1-tests_jar-_-any-5554581464048647256/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:36,585 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@481fa549{HTTP/1.1, (http/1.1)}{localhost:39447} 2024-11-16T11:35:36,585 INFO [Time-limited test {}] server.Server(415): Started @121807ms 2024-11-16T11:35:36,586 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:35:36,635 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:36,640 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:36,646 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:36,646 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:36,646 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:35:36,647 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@766d120{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:36,647 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c5281d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:36,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b31c6d1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/java.io.tmpdir/jetty-localhost-42383-hadoop-hdfs-3_4_1-tests_jar-_-any-8403019390123941275/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:36,754 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2da63af0{HTTP/1.1, (http/1.1)}{localhost:42383} 2024-11-16T11:35:36,754 INFO [Time-limited test {}] server.Server(415): Started @121976ms 2024-11-16T11:35:36,755 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:35:36,817 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:36,820 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:36,821 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:36,821 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:36,821 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:35:36,822 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7719cd31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:36,822 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36b71b65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:36,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@739cb936{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/java.io.tmpdir/jetty-localhost-38581-hadoop-hdfs-3_4_1-tests_jar-_-any-6054707484499035687/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:36,936 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f39d55e{HTTP/1.1, (http/1.1)}{localhost:38581} 2024-11-16T11:35:36,936 INFO [Time-limited test {}] server.Server(415): Started @122158ms 2024-11-16T11:35:36,937 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:35:37,746 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data6/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:37,746 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data5/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:37,769 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:35:37,772 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6d646661b810ce54 with lease ID 0x839e6fff8380021b: Processing first storage report for DS-291144d9-44fd-4748-b55f-5ae5897bdb04 from datanode DatanodeRegistration(127.0.0.1:44795, datanodeUuid=7bf661b8-2dea-4d21-909e-b23af81fa075, infoPort=36461, infoSecurePort=0, ipcPort=37363, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:37,772 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d646661b810ce54 with lease ID 0x839e6fff8380021b: from storage DS-291144d9-44fd-4748-b55f-5ae5897bdb04 node DatanodeRegistration(127.0.0.1:44795, datanodeUuid=7bf661b8-2dea-4d21-909e-b23af81fa075, infoPort=36461, infoSecurePort=0, ipcPort=37363, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:37,773 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6d646661b810ce54 with lease ID 0x839e6fff8380021b: Processing first storage report for DS-0ad1cf66-890d-4777-a89a-19fe94fd7bfe from datanode DatanodeRegistration(127.0.0.1:44795, datanodeUuid=7bf661b8-2dea-4d21-909e-b23af81fa075, infoPort=36461, infoSecurePort=0, ipcPort=37363, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:37,773 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6d646661b810ce54 with lease ID 0x839e6fff8380021b: from storage DS-0ad1cf66-890d-4777-a89a-19fe94fd7bfe node DatanodeRegistration(127.0.0.1:44795, datanodeUuid=7bf661b8-2dea-4d21-909e-b23af81fa075, infoPort=36461, infoSecurePort=0, ipcPort=37363, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:38,137 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:38,137 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:38,159 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:35:38,162 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5933b1e9dc4997c6 with lease ID 0x839e6fff8380021c: Processing first storage report for DS-abad147b-6581-4451-9827-ec9aadfc1352 from datanode DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:38,162 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5933b1e9dc4997c6 with lease ID 0x839e6fff8380021c: from storage DS-abad147b-6581-4451-9827-ec9aadfc1352 node DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:38,162 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5933b1e9dc4997c6 with lease ID 0x839e6fff8380021c: Processing first storage report for DS-f6817f8b-18e9-40aa-9403-eeb8a7a2cbb5 from datanode DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:38,162 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5933b1e9dc4997c6 with lease ID 0x839e6fff8380021c: from storage DS-f6817f8b-18e9-40aa-9403-eeb8a7a2cbb5 node DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:38,253 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data9/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:38,253 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data10/current/BP-222813829-172.17.0.2-1731756922121/current, will proceed with Du for space computation calculation, 2024-11-16T11:35:38,270 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:35:38,273 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1606b528b9d62c28 with lease ID 0x839e6fff8380021d: Processing first storage report for DS-24b14bd7-d130-4510-a902-d7aee8c194a0 from datanode DatanodeRegistration(127.0.0.1:33925, datanodeUuid=e90eba1d-613c-42bc-9a70-1d6b287fd77c, infoPort=37035, infoSecurePort=0, ipcPort=33223, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:38,273 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1606b528b9d62c28 with lease ID 0x839e6fff8380021d: from storage DS-24b14bd7-d130-4510-a902-d7aee8c194a0 node DatanodeRegistration(127.0.0.1:33925, datanodeUuid=e90eba1d-613c-42bc-9a70-1d6b287fd77c, infoPort=37035, infoSecurePort=0, ipcPort=33223, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T11:35:38,273 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1606b528b9d62c28 with lease ID 0x839e6fff8380021d: Processing first storage report for DS-ce8e403d-f74d-41d3-8c87-b99c339bfef1 from datanode DatanodeRegistration(127.0.0.1:33925, datanodeUuid=e90eba1d-613c-42bc-9a70-1d6b287fd77c, infoPort=37035, infoSecurePort=0, ipcPort=33223, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121) 2024-11-16T11:35:38,273 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1606b528b9d62c28 with lease ID 0x839e6fff8380021d: from storage DS-ce8e403d-f74d-41d3-8c87-b99c339bfef1 node DatanodeRegistration(127.0.0.1:33925, datanodeUuid=e90eba1d-613c-42bc-9a70-1d6b287fd77c, infoPort=37035, infoSecurePort=0, ipcPort=33223, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:38,275 WARN [ResponseProcessor for block BP-222813829-172.17.0.2-1731756922121:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-222813829-172.17.0.2-1731756922121:blk_1073741832_1008 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,275 WARN [ResponseProcessor for block BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,275 WARN [ResponseProcessor for block BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,276 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 block BP-222813829-172.17.0.2-1731756922121:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:38,276 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta block BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK], DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:38,276 WARN [PacketResponder: BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42905] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,276 WARN [PacketResponder: BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42905] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,276 WARN [ResponseProcessor for block BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,276 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 block BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK], DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:38,276 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 block BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK], DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:38,276 WARN [PacketResponder: BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42905] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,277 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:49512 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:43447:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49512 dst: /127.0.0.1:43447 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,276 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:58004 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:42905:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58004 dst: /127.0.0.1:42905 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,277 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-11295015_22 at /127.0.0.1:49558 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43447:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49558 dst: /127.0.0.1:43447 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,278 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1120756260_22 at /127.0.0.1:49504 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43447:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49504 dst: /127.0.0.1:43447 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,278 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:58026 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42905:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58026 dst: /127.0.0.1:42905 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,277 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1120756260_22 at /127.0.0.1:57984 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42905:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57984 dst: /127.0.0.1:42905 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,278 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:49534 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43447:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49534 dst: /127.0.0.1:43447 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,278 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-11295015_22 at /127.0.0.1:58052 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:42905:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58052 dst: /127.0.0.1:42905 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,285 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78b7f97a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:38,285 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e4b1502{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:38,286 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:38,286 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@129c308c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:38,286 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37edf9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:38,287 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:35:38,287 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:35:38,287 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-222813829-172.17.0.2-1731756922121 (Datanode Uuid 737f46b5-4f75-46d5-bb6d-e4740cb99e10) service to localhost/127.0.0.1:39669 2024-11-16T11:35:38,287 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:35:38,288 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data3/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:38,288 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data4/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:38,288 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:35:38,289 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 block BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,294 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@7200d34e {}] datanode.DataXceiver(331): 127.0.0.1:43447:DataXceiver error processing unknown operation src: /127.0.0.1:59000 dst: /127.0.0.1:43447 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:38,295 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 block BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,302 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta block BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,302 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 block BP-222813829-172.17.0.2-1731756922121:blk_1073741832_1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741832_1008 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,306 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50936d12{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:38,306 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59304df5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:38,306 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:38,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29d3f1c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:38,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7150e922{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:38,308 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:35:38,308 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:35:38,308 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-222813829-172.17.0.2-1731756922121 (Datanode Uuid 69afaec0-a137-45bb-899a-2d2b75843e0b) service to localhost/127.0.0.1:39669 2024-11-16T11:35:38,308 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:35:38,309 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data1/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:38,309 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data2/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:38,309 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:35:38,314 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68., hostname=a7948fca2832,40843,1731756924773, seqNum=2] 2024-11-16T11:35:38,316 ERROR [FSHLog-0-hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b-prefix:a7948fca2832,40843,1731756924773 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,316 WARN [FSHLog-0-hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b-prefix:a7948fca2832,40843,1731756924773 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,317 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,317 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C40843%2C1731756924773:(num 1731756925419) roll requested 2024-11-16T11:35:38,317 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C40843%2C1731756924773.1731756938317 2024-11-16T11:35:38,327 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,334 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:38,334 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:38,334 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:38,334 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:38,334 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:38,334 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756938317 2024-11-16T11:35:38,335 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,335 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:38,336 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-16T11:35:38,337 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-16T11:35:38,337 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 2024-11-16T11:35:38,340 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37035:37035),(127.0.0.1/127.0.0.1:36461:36461)] 2024-11-16T11:35:38,340 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 is not closed yet, will try archiving it next time 2024-11-16T11:35:38,340 WARN [IPC Server handler 0 on default port 39669 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741832_1008 2024-11-16T11:35:38,344 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 after 5ms 2024-11-16T11:35:38,895 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:40,327 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:40,340 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:40,345 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756938317 2024-11-16T11:35:40,346 WARN [ResponseProcessor for block BP-222813829-172.17.0.2-1731756922121:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-222813829-172.17.0.2-1731756922121:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:40,346 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756938317 block BP-222813829-172.17.0.2-1731756922121:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK], DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:40,347 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:49742 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49742 dst: /127.0.0.1:44795 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:40,347 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:39908 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33925:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39908 dst: /127.0.0.1:33925 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:40,372 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@739cb936{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:40,373 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f39d55e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:40,373 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:40,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36b71b65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:40,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7719cd31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:40,374 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:35:40,374 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:35:40,374 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:35:40,374 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-222813829-172.17.0.2-1731756922121 (Datanode Uuid e90eba1d-613c-42bc-9a70-1d6b287fd77c) service to localhost/127.0.0.1:39669 2024-11-16T11:35:40,375 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data9/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:40,375 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data10/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:40,375 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:35:40,895 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:42,328 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:42,341 WARN [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]] 2024-11-16T11:35:42,342 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:42,342 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C40843%2C1731756924773:(num 1731756938317) roll requested 2024-11-16T11:35:42,342 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C40843%2C1731756924773.1731756942342 2024-11-16T11:35:42,345 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 after 4008ms 2024-11-16T11:35:42,346 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:42,346 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:42,346 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741839_1021 2024-11-16T11:35:42,349 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:42,353 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43447 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:42,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54840 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741840_1022 to mirror 127.0.0.1:43447 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:42,353 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:42,353 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741840_1022 2024-11-16T11:35:42,353 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54840 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T11:35:42,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54840 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54840 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:42,354 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:42,358 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:42,358 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:42,358 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:42,358 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:42,358 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:42,358 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756938317 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756942342 2024-11-16T11:35:42,359 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36461:36461),(127.0.0.1/127.0.0.1:45383:45383)] 2024-11-16T11:35:42,359 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 is not closed yet, will try archiving it next time 2024-11-16T11:35:42,359 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756938317 is not closed yet, will try archiving it next time 2024-11-16T11:35:42,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44795 is added to blk_1073741838_1020 (size=2431) 2024-11-16T11:35:42,381 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T11:35:42,761 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 is not closed yet, will try archiving it next time 2024-11-16T11:35:42,895 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:44,328 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:44,359 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:44,385 WARN [ResponseProcessor for block BP-222813829-172.17.0.2-1731756922121:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-222813829-172.17.0.2-1731756922121:blk_1073741841_1023 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:44,385 WARN [DataStreamer for file /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756942342 block BP-222813829-172.17.0.2-1731756922121:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:44,386 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54854 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54854 dst: /127.0.0.1:37083 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:44,386 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:47970 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:44795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47970 dst: /127.0.0.1:44795 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:44,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@458915b3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:44,456 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@481fa549{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:35:44,457 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:35:44,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77f120ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:35:44,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18ce565e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,STOPPED} 2024-11-16T11:35:44,458 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:35:44,458 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:35:44,458 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-222813829-172.17.0.2-1731756922121 (Datanode Uuid 7bf661b8-2dea-4d21-909e-b23af81fa075) service to localhost/127.0.0.1:39669 2024-11-16T11:35:44,458 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:35:44,459 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data5/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:44,459 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data6/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:35:44,460 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:35:44,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40843 {}] regionserver.HRegion(8855): Flush requested on 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:44,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7550ed67a678c82a6324e4ade595ce68 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:35:44,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/aee356af88fc4f0db9771da8e8246aaa is 1080, key is row0002/info:/1731756940377/Put/seqid=0 2024-11-16T11:35:44,492 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44795 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:44,492 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54862 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741842_1025 to mirror 127.0.0.1:44795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:44,492 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:44,492 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741842_1025 2024-11-16T11:35:44,492 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54862 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T11:35:44,492 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54862 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54862 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:44,493 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:35:44,494 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:44,495 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK], DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:44,495 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741843_1026 2024-11-16T11:35:44,495 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:44,497 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:44,497 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK], DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:44,497 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741844_1027 2024-11-16T11:35:44,498 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK] 2024-11-16T11:35:44,499 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:44,500 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:44,500 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741845_1028 2024-11-16T11:35:44,501 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:44,502 WARN [IPC Server handler 1 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T11:35:44,502 WARN [IPC Server handler 1 on default port 39669 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T11:35:44,502 WARN [IPC Server handler 1 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T11:35:44,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741846_1029 (size=10347) 2024-11-16T11:35:44,896 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:44,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/aee356af88fc4f0db9771da8e8246aaa 2024-11-16T11:35:44,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/aee356af88fc4f0db9771da8e8246aaa as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/aee356af88fc4f0db9771da8e8246aaa 2024-11-16T11:35:44,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/aee356af88fc4f0db9771da8e8246aaa, entries=5, sequenceid=11, filesize=10.1 K 2024-11-16T11:35:44,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 7550ed67a678c82a6324e4ade595ce68 in 455ms, sequenceid=11, compaction requested=false 2024-11-16T11:35:44,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:35:45,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40843 {}] regionserver.HRegion(8855): Flush requested on 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:45,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7550ed67a678c82a6324e4ade595ce68 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-16T11:35:45,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/96b07281870a40518483a92f9f0bd690 is 1080, key is row0007/info:/1731756944471/Put/seqid=0 2024-11-16T11:35:45,104 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43447 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:45,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54888 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741847_1030 to mirror 127.0.0.1:43447 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:45,104 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:45,104 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54888 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T11:35:45,104 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741847_1030 2024-11-16T11:35:45,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54888 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54888 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:45,105 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:45,107 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:45,107 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK], DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:45,107 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741848_1031 2024-11-16T11:35:45,108 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK] 2024-11-16T11:35:45,111 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42905 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:45,111 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54890 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741849_1032 to mirror 127.0.0.1:42905 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:45,111 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:45,111 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741849_1032 2024-11-16T11:35:45,111 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54890 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T11:35:45,111 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54890 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54890 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:45,112 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:45,114 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44795 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:45,114 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54906 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741850_1033 to mirror 127.0.0.1:44795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:45,114 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:45,115 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741850_1033 2024-11-16T11:35:45,115 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54906 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T11:35:45,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54906 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54906 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:45,115 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:35:45,116 WARN [IPC Server handler 1 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T11:35:45,116 WARN [IPC Server handler 1 on default port 39669 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T11:35:45,116 WARN [IPC Server handler 1 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T11:35:45,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741851_1034 (size=12506) 2024-11-16T11:35:45,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/96b07281870a40518483a92f9f0bd690 2024-11-16T11:35:45,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/96b07281870a40518483a92f9f0bd690 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/96b07281870a40518483a92f9f0bd690 2024-11-16T11:35:45,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/96b07281870a40518483a92f9f0bd690, entries=7, sequenceid=24, filesize=12.2 K 2024-11-16T11:35:45,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 7550ed67a678c82a6324e4ade595ce68 in 441ms, sequenceid=24, compaction requested=false 2024-11-16T11:35:45,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:35:45,538 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-16T11:35:45,538 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:45,538 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/96b07281870a40518483a92f9f0bd690 because midkey is the same as first or last row 2024-11-16T11:35:46,329 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,360 WARN [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]] 2024-11-16T11:35:46,360 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,360 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C40843%2C1731756924773:(num 1731756942342) roll requested 2024-11-16T11:35:46,361 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C40843%2C1731756924773.1731756946360 2024-11-16T11:35:46,363 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,364 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK], DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:46,364 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741852_1035 2024-11-16T11:35:46,364 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:46,366 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,366 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:46,366 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741853_1036 2024-11-16T11:35:46,367 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:35:46,368 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,368 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:46,369 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741854_1037 2024-11-16T11:35:46,369 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:46,370 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,371 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:46,371 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741855_1038 2024-11-16T11:35:46,371 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK] 2024-11-16T11:35:46,372 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T11:35:46,372 WARN [IPC Server handler 0 on default port 39669 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T11:35:46,372 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T11:35:46,375 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:46,375 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:46,375 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:46,375 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:46,375 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:46,375 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756942342 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756946360 2024-11-16T11:35:46,376 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45383:45383)] 2024-11-16T11:35:46,376 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 is not closed yet, will try archiving it next time 2024-11-16T11:35:46,376 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756942342 is not closed yet, will try archiving it next time 2024-11-16T11:35:46,377 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756938317 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs/a7948fca2832%2C40843%2C1731756924773.1731756938317 2024-11-16T11:35:46,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741841_1024 (size=25992) 2024-11-16T11:35:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40843 {}] regionserver.HRegion(8855): Flush requested on 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:46,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7550ed67a678c82a6324e4ade595ce68 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T11:35:46,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/6cbf97a6ff4f4bc78d17edd1f94f4e42 is 1079, key is tmprow/info:/1731756946515/Put/seqid=0 2024-11-16T11:35:46,522 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,523 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:46,523 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741857_1040 2024-11-16T11:35:46,523 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:46,524 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,525 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:46,525 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741858_1041 2024-11-16T11:35:46,525 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK] 2024-11-16T11:35:46,526 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,527 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:46,527 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741859_1042 2024-11-16T11:35:46,527 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:35:46,529 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,529 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:46,529 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741860_1043 2024-11-16T11:35:46,529 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:46,530 WARN [IPC Server handler 3 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T11:35:46,530 WARN [IPC Server handler 3 on default port 39669 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T11:35:46,530 WARN [IPC Server handler 3 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T11:35:46,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741861_1044 (size=6027) 2024-11-16T11:35:46,778 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 is not closed yet, will try archiving it next time 2024-11-16T11:35:46,896 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/6cbf97a6ff4f4bc78d17edd1f94f4e42 2024-11-16T11:35:46,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/6cbf97a6ff4f4bc78d17edd1f94f4e42 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/6cbf97a6ff4f4bc78d17edd1f94f4e42 2024-11-16T11:35:46,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/6cbf97a6ff4f4bc78d17edd1f94f4e42, entries=1, sequenceid=34, filesize=5.9 K 2024-11-16T11:35:46,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 7550ed67a678c82a6324e4ade595ce68 in 432ms, sequenceid=34, compaction requested=true 2024-11-16T11:35:46,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:35:46,948 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-16T11:35:46,948 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:46,948 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/96b07281870a40518483a92f9f0bd690 because midkey is the same as first or last row 2024-11-16T11:35:46,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7550ed67a678c82a6324e4ade595ce68:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:35:46,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:35:46,949 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:35:46,950 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:35:46,950 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HStore(1541): 7550ed67a678c82a6324e4ade595ce68/info is initiating minor compaction (all files) 2024-11-16T11:35:46,950 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7550ed67a678c82a6324e4ade595ce68/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:46,951 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/aee356af88fc4f0db9771da8e8246aaa, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/96b07281870a40518483a92f9f0bd690, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/6cbf97a6ff4f4bc78d17edd1f94f4e42] into tmpdir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp, totalSize=28.2 K 2024-11-16T11:35:46,951 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.Compactor(225): Compacting aee356af88fc4f0db9771da8e8246aaa, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731756940377 2024-11-16T11:35:46,951 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96b07281870a40518483a92f9f0bd690, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731756944471 2024-11-16T11:35:46,952 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6cbf97a6ff4f4bc78d17edd1f94f4e42, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731756946515 2024-11-16T11:35:46,968 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7550ed67a678c82a6324e4ade595ce68#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:35:46,968 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/456575483ca5470ebc6957b4ebab6204 is 1080, key is row0002/info:/1731756940377/Put/seqid=0 2024-11-16T11:35:46,971 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,971 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:46,971 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741862_1045 2024-11-16T11:35:46,972 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:35:46,973 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,973 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:46,973 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741863_1046 2024-11-16T11:35:46,974 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK] 2024-11-16T11:35:46,975 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,975 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:46,975 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741864_1047 2024-11-16T11:35:46,976 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:46,977 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:46,977 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:46,977 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741865_1048 2024-11-16T11:35:46,978 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:46,979 WARN [IPC Server handler 2 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T11:35:46,979 WARN [IPC Server handler 2 on default port 39669 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T11:35:46,979 WARN [IPC Server handler 2 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T11:35:46,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741866_1049 (size=17994) 2024-11-16T11:35:47,171 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@76daef56[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741851_1034 to 127.0.0.1:43447 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:47,171 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3a7f417[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741846_1029 to 127.0.0.1:44795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:47,395 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/456575483ca5470ebc6957b4ebab6204 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204 2024-11-16T11:35:47,404 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7550ed67a678c82a6324e4ade595ce68/info of 7550ed67a678c82a6324e4ade595ce68 into 456575483ca5470ebc6957b4ebab6204(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:35:47,404 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:35:47,404 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68., storeName=7550ed67a678c82a6324e4ade595ce68/info, priority=13, startTime=1731756946948; duration=0sec 2024-11-16T11:35:47,404 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T11:35:47,404 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:47,404 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204 because midkey is the same as first or last row 2024-11-16T11:35:47,405 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T11:35:47,405 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:47,405 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204 because midkey is the same as first or last row 2024-11-16T11:35:47,405 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T11:35:47,405 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:47,405 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204 because midkey is the same as first or last row 2024-11-16T11:35:47,405 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:35:47,405 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7550ed67a678c82a6324e4ade595ce68:info 2024-11-16T11:35:47,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40843 {}] regionserver.HRegion(8855): Flush requested on 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:47,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7550ed67a678c82a6324e4ade595ce68 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T11:35:47,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/5bd25917cdaa4215baae2d6be983455e is 1079, key is tmprow/info:/1731756947935/Put/seqid=0 2024-11-16T11:35:47,946 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:47,946 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:47,946 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741867_1050 2024-11-16T11:35:47,947 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:47,950 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44795 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:47,950 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54980 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741868_1051 to mirror 127.0.0.1:44795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:47,950 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:47,950 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741868_1051 2024-11-16T11:35:47,950 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54980 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T11:35:47,950 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54980 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54980 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:47,951 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:35:47,954 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:47,954 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54990 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741869_1052 to mirror 127.0.0.1:33925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:47,955 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:47,955 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741869_1052 2024-11-16T11:35:47,955 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54990 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T11:35:47,955 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54990 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54990 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:47,955 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK] 2024-11-16T11:35:47,958 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43447 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:47,958 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54994 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741870_1053 to mirror 127.0.0.1:43447 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:47,958 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:47,958 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54994 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T11:35:47,958 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741870_1053 2024-11-16T11:35:47,958 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:54994 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54994 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:47,959 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:47,959 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T11:35:47,959 WARN [IPC Server handler 0 on default port 39669 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T11:35:47,959 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T11:35:47,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741871_1054 (size=6027) 2024-11-16T11:35:48,165 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3a7f417[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741841_1024 to 127.0.0.1:33925 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:48,165 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@76daef56[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741861_1044 to 127.0.0.1:42905 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:48,329 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:48,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/5bd25917cdaa4215baae2d6be983455e 2024-11-16T11:35:48,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/5bd25917cdaa4215baae2d6be983455e as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/5bd25917cdaa4215baae2d6be983455e 2024-11-16T11:35:48,377 WARN [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]] 2024-11-16T11:35:48,377 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:48,377 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C40843%2C1731756924773:(num 1731756946360) roll requested 2024-11-16T11:35:48,377 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C40843%2C1731756924773.1731756948377 2024-11-16T11:35:48,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/5bd25917cdaa4215baae2d6be983455e, entries=1, sequenceid=45, filesize=5.9 K 2024-11-16T11:35:48,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 7550ed67a678c82a6324e4ade595ce68 in 443ms, sequenceid=45, compaction requested=false 2024-11-16T11:35:48,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:35:48,380 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-16T11:35:48,380 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:48,380 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204 because midkey is the same as first or last row 2024-11-16T11:35:48,381 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43447 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:48,381 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:55002 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741872_1055 to mirror 127.0.0.1:43447 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:48,381 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:48,381 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741872_1055 2024-11-16T11:35:48,381 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:55002 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T11:35:48,381 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:55002 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55002 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:48,382 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:48,383 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:48,383 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:48,383 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741873_1056 2024-11-16T11:35:48,384 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK] 2024-11-16T11:35:48,385 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:48,385 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:48,385 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741874_1057 2024-11-16T11:35:48,385 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:35:48,387 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:48,387 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:48,387 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741875_1058 2024-11-16T11:35:48,387 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:48,388 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T11:35:48,388 WARN [IPC Server handler 0 on default port 39669 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T11:35:48,388 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T11:35:48,390 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:48,390 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:48,390 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:48,390 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:48,390 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:48,391 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756946360 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756948377 2024-11-16T11:35:48,392 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45383:45383)] 2024-11-16T11:35:48,392 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 is not closed yet, will try archiving it next time 2024-11-16T11:35:48,392 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756946360 is not closed yet, will try archiving it next time 2024-11-16T11:35:48,392 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756942342 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs/a7948fca2832%2C40843%2C1731756924773.1731756942342 2024-11-16T11:35:48,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741856_1039 (size=13591) 2024-11-16T11:35:48,793 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 is not closed yet, will try archiving it next time 2024-11-16T11:35:48,896 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:49,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40843 {}] regionserver.HRegion(8855): Flush requested on 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:35:49,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7550ed67a678c82a6324e4ade595ce68 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T11:35:49,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/a17d8a4238b54bf2b7fd1f0034ba9bf2 is 1079, key is tmprow/info:/1731756949356/Put/seqid=0 2024-11-16T11:35:49,366 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:49,366 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:49,367 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741877_1060 2024-11-16T11:35:49,367 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:35:49,369 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:49,369 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:49,369 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741878_1061 2024-11-16T11:35:49,369 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:49,371 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:49,371 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:49,371 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741879_1062 2024-11-16T11:35:49,372 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:49,374 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33925 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:49,374 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:55020 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741880_1063 to mirror 127.0.0.1:33925 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:49,374 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:49,374 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741880_1063 2024-11-16T11:35:49,374 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:55020 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T11:35:49,375 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523797818_22 at /127.0.0.1:55020 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55020 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:49,375 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK] 2024-11-16T11:35:49,376 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T11:35:49,376 WARN [IPC Server handler 0 on default port 39669 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T11:35:49,376 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T11:35:49,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741881_1064 (size=6027) 2024-11-16T11:35:49,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/a17d8a4238b54bf2b7fd1f0034ba9bf2 2024-11-16T11:35:49,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/a17d8a4238b54bf2b7fd1f0034ba9bf2 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/a17d8a4238b54bf2b7fd1f0034ba9bf2 2024-11-16T11:35:49,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/a17d8a4238b54bf2b7fd1f0034ba9bf2, entries=1, sequenceid=55, filesize=5.9 K 2024-11-16T11:35:49,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 7550ed67a678c82a6324e4ade595ce68 in 436ms, sequenceid=55, compaction requested=true 2024-11-16T11:35:49,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:35:49,794 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-16T11:35:49,794 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:49,794 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204 because midkey is the same as first or last row 2024-11-16T11:35:49,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7550ed67a678c82a6324e4ade595ce68:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:35:49,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:35:49,795 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:35:49,796 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:35:49,796 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HStore(1541): 7550ed67a678c82a6324e4ade595ce68/info is initiating minor compaction (all files) 2024-11-16T11:35:49,796 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7550ed67a678c82a6324e4ade595ce68/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:35:49,796 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/5bd25917cdaa4215baae2d6be983455e, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/a17d8a4238b54bf2b7fd1f0034ba9bf2] into tmpdir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp, totalSize=29.3 K 2024-11-16T11:35:49,797 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.Compactor(225): Compacting 456575483ca5470ebc6957b4ebab6204, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731756940377 2024-11-16T11:35:49,797 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5bd25917cdaa4215baae2d6be983455e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731756947935 2024-11-16T11:35:49,798 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.Compactor(225): Compacting a17d8a4238b54bf2b7fd1f0034ba9bf2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731756949356 2024-11-16T11:35:49,816 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7550ed67a678c82a6324e4ade595ce68#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:35:49,817 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/7844f4037cba4d8690f089989fed9944 is 1080, key is row0002/info:/1731756940377/Put/seqid=0 2024-11-16T11:35:49,818 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:49,819 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:35:49,819 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741882_1065 2024-11-16T11:35:49,819 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:35:49,820 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:49,820 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]) is bad. 2024-11-16T11:35:49,821 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741883_1066 2024-11-16T11:35:49,821 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42905,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK] 2024-11-16T11:35:49,822 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:49,822 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK], DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK]) is bad. 2024-11-16T11:35:49,822 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741884_1067 2024-11-16T11:35:49,823 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33925,DS-24b14bd7-d130-4510-a902-d7aee8c194a0,DISK] 2024-11-16T11:35:49,824 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:49,824 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]) is bad. 2024-11-16T11:35:49,825 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741885_1068 2024-11-16T11:35:49,825 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK] 2024-11-16T11:35:49,826 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T11:35:49,826 WARN [IPC Server handler 0 on default port 39669 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T11:35:49,826 WARN [IPC Server handler 0 on default port 39669 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T11:35:49,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741886_1069 (size=18097) 2024-11-16T11:35:50,166 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3a7f417[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741871_1054 to 127.0.0.1:42905 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:50,166 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@76daef56[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741866_1049 to 127.0.0.1:42905 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:50,329 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:50,392 WARN [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-16T11:35:50,392 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:50,399 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/7844f4037cba4d8690f089989fed9944 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944 2024-11-16T11:35:50,407 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7550ed67a678c82a6324e4ade595ce68/info of 7550ed67a678c82a6324e4ade595ce68 into 7844f4037cba4d8690f089989fed9944(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:35:50,407 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:35:50,407 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68., storeName=7550ed67a678c82a6324e4ade595ce68/info, priority=13, startTime=1731756949794; duration=0sec 2024-11-16T11:35:50,407 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T11:35:50,407 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:50,407 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944 because midkey is the same as first or last row 2024-11-16T11:35:50,408 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T11:35:50,408 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:50,408 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944 because midkey is the same as first or last row 2024-11-16T11:35:50,408 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T11:35:50,408 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:35:50,408 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944 because midkey is the same as first or last row 2024-11-16T11:35:50,408 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:35:50,408 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7550ed67a678c82a6324e4ade595ce68:info 2024-11-16T11:35:50,582 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:35:50,587 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:35:50,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:35:50,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:35:50,588 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:35:50,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a952b35{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:35:50,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3edd2eb0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:35:50,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@dafb872{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/java.io.tmpdir/jetty-localhost-39295-hadoop-hdfs-3_4_1-tests_jar-_-any-15774982549153789781/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:35:50,693 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74f0cad6{HTTP/1.1, (http/1.1)}{localhost:39295} 2024-11-16T11:35:50,693 INFO [Time-limited test {}] server.Server(415): Started @135915ms 2024-11-16T11:35:50,694 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:35:50,897 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:51,125 WARN [Thread-987 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:35:51,132 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2da1f82c9d28e035 with lease ID 0x839e6fff8380021e: from storage DS-590e89d7-197c-4113-851d-bcf056491e78 node DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T11:35:51,133 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2da1f82c9d28e035 with lease ID 0x839e6fff8380021e: from storage DS-13ead2df-b1b4-4f4b-aa7b-9efef1ff3a5a node DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:35:51,165 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@76daef56[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741881_1064 to 127.0.0.1:33925 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:51,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741856_1039 (size=13591) 2024-11-16T11:35:52,330 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:52,393 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:52,897 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:53,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741886_1069 (size=18097) 2024-11-16T11:35:54,330 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:54,393 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:54,558 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T11:35:54,898 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:55,151 ERROR [FSHLog-0-hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData-prefix:a7948fca2832,42791,1731756924581 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:55,151 WARN [FSHLog-0-hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData-prefix:a7948fca2832,42791,1731756924581 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:55,151 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C42791%2C1731756924581:(num 1731756924917) roll requested 2024-11-16T11:35:55,152 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C42791%2C1731756924581.1731756955151 2024-11-16T11:35:55,163 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:55,163 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:55,163 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:55,163 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:55,163 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:35:55,164 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756955151 2024-11-16T11:35:55,164 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:55,164 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:55,164 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 2024-11-16T11:35:55,165 WARN [IPC Server handler 2 on default port 39669 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741830_1006 2024-11-16T11:35:55,165 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 after 1ms 2024-11-16T11:35:55,170 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45383:45383),(127.0.0.1/127.0.0.1:36709:36709)] 2024-11-16T11:35:55,170 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 is not closed yet, will try archiving it next time 2024-11-16T11:35:56,331 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:56,394 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:58,331 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:58,394 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:35:59,128 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@70ae0229[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741835_1011 to 127.0.0.1:33925 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:59,129 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4cce6ff7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741833_1009 to 127.0.0.1:33925 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:35:59,167 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 after 4002ms 2024-11-16T11:36:00,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:36:00,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:36:00,332 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:00,395 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:01,148 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@41258194 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-222813829-172.17.0.2-1731756922121:blk_1073741832_1008, datanode=DatanodeInfoWithStorage[127.0.0.1:43447,null,null]) java.net.ConnectException: Call From a7948fca2832/172.17.0.2 to localhost:44953 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T11:36:01,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741832_1019 (size=455) 2024-11-16T11:36:01,364 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756925419 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs/a7948fca2832%2C40843%2C1731756924773.1731756925419 2024-11-16T11:36:01,365 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756946360 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs/a7948fca2832%2C40843%2C1731756924773.1731756946360 2024-11-16T11:36:02,126 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@70ae0229[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741832_1019 to 127.0.0.1:33925 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:02,126 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4cce6ff7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741826_1002 to 127.0.0.1:33925 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:02,332 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:02,395 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:03,824 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C40843%2C1731756924773.1731756963824 2024-11-16T11:36:03,835 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:03,835 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:03,835 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:03,836 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:03,836 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:03,836 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756948377 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756963824 2024-11-16T11:36:03,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741876_1059 (size=12911) 2024-11-16T11:36:03,839 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45383:45383),(127.0.0.1/127.0.0.1:36709:36709)] 2024-11-16T11:36:03,839 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756948377 is not closed yet, will try archiving it next time 2024-11-16T11:36:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40843 {}] regionserver.HRegion(8855): Flush requested on 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:36:03,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7550ed67a678c82a6324e4ade595ce68 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T11:36:03,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/8547933530584bdb8f80296fb8db2713 is 1080, key is row0013/info:/1731756963841/Put/seqid=0 2024-11-16T11:36:03,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741889_1073 (size=8190) 2024-11-16T11:36:03,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741889_1073 (size=8190) 2024-11-16T11:36:03,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/8547933530584bdb8f80296fb8db2713 2024-11-16T11:36:03,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/8547933530584bdb8f80296fb8db2713 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/8547933530584bdb8f80296fb8db2713 2024-11-16T11:36:03,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/8547933530584bdb8f80296fb8db2713, entries=3, sequenceid=66, filesize=8.0 K 2024-11-16T11:36:03,890 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 7550ed67a678c82a6324e4ade595ce68 in 45ms, sequenceid=66, compaction requested=false 2024-11-16T11:36:03,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:36:03,891 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-16T11:36:03,891 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:36:03,891 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944 because midkey is the same as first or last row 2024-11-16T11:36:04,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40843 {}] regionserver.HRegion(8855): Flush requested on 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:36:04,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7550ed67a678c82a6324e4ade595ce68 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-16T11:36:04,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/110b17aa7b2c47bebef8f3bd4d981569 is 1080, key is row0015/info:/1731756963846/Put/seqid=0 2024-11-16T11:36:04,078 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,079 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:36:04,079 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741890_1074 2024-11-16T11:36:04,079 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:36:04,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741891_1075 (size=14660) 2024-11-16T11:36:04,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741891_1075 (size=14660) 2024-11-16T11:36:04,086 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/110b17aa7b2c47bebef8f3bd4d981569 2024-11-16T11:36:04,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/110b17aa7b2c47bebef8f3bd4d981569 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/110b17aa7b2c47bebef8f3bd4d981569 2024-11-16T11:36:04,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/110b17aa7b2c47bebef8f3bd4d981569, entries=9, sequenceid=79, filesize=14.3 K 2024-11-16T11:36:04,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 7550ed67a678c82a6324e4ade595ce68 in 32ms, sequenceid=79, compaction requested=true 2024-11-16T11:36:04,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:36:04,103 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-16T11:36:04,103 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:36:04,103 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944 because midkey is the same as first or last row 2024-11-16T11:36:04,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7550ed67a678c82a6324e4ade595ce68:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:36:04,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:36:04,103 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:36:04,104 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:36:04,104 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HStore(1541): 7550ed67a678c82a6324e4ade595ce68/info is initiating minor compaction (all files) 2024-11-16T11:36:04,104 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7550ed67a678c82a6324e4ade595ce68/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:36:04,105 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/8547933530584bdb8f80296fb8db2713, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/110b17aa7b2c47bebef8f3bd4d981569] into tmpdir=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp, totalSize=40.0 K 2024-11-16T11:36:04,105 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7844f4037cba4d8690f089989fed9944, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731756940377 2024-11-16T11:36:04,106 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8547933530584bdb8f80296fb8db2713, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731756950372 2024-11-16T11:36:04,106 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] compactions.Compactor(225): Compacting 110b17aa7b2c47bebef8f3bd4d981569, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731756963846 2024-11-16T11:36:04,122 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7550ed67a678c82a6324e4ade595ce68#info#compaction#27 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:36:04,123 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/10906c715bad486481d18e1513ced6d5 is 1080, key is row0002/info:/1731756940377/Put/seqid=0 2024-11-16T11:36:04,124 WARN [Thread-1043 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,125 WARN [Thread-1043 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:36:04,125 WARN [Thread-1043 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741892_1076 2024-11-16T11:36:04,125 WARN [Thread-1043 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:36:04,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741893_1077 (size=28989) 2024-11-16T11:36:04,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741893_1077 (size=28989) 2024-11-16T11:36:04,137 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/10906c715bad486481d18e1513ced6d5 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/10906c715bad486481d18e1513ced6d5 2024-11-16T11:36:04,146 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7550ed67a678c82a6324e4ade595ce68/info of 7550ed67a678c82a6324e4ade595ce68 into 10906c715bad486481d18e1513ced6d5(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7550ed67a678c82a6324e4ade595ce68: 2024-11-16T11:36:04,146 INFO [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68., storeName=7550ed67a678c82a6324e4ade595ce68/info, priority=13, startTime=1731756964103; duration=0sec 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/10906c715bad486481d18e1513ced6d5 because midkey is the same as first or last row 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/10906c715bad486481d18e1513ced6d5 because midkey is the same as first or last row 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/10906c715bad486481d18e1513ced6d5 because midkey is the same as first or last row 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:36:04,146 DEBUG [RS:0;a7948fca2832:40843-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7550ed67a678c82a6324e4ade595ce68:info 2024-11-16T11:36:04,239 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.1731756948377 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs/a7948fca2832%2C40843%2C1731756924773.1731756948377 2024-11-16T11:36:04,332 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,395 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-16T11:36:04,395 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T11:36:04,476 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:36:04,476 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:36:04,476 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:04,476 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:04,476 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T11:36:04,477 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T11:36:04,477 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1352773276, stopped=false 2024-11-16T11:36:04,477 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7948fca2832,42791,1731756924581 2024-11-16T11:36:04,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:36:04,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:36:04,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:36:04,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:04,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:04,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:04,524 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:36:04,525 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:36:04,525 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:36:04,525 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:04,525 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7948fca2832,40843,1731756924773' ***** 2024-11-16T11:36:04,525 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T11:36:04,525 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7948fca2832,46401,1731756926229' ***** 2024-11-16T11:36:04,525 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T11:36:04,525 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T11:36:04,525 INFO [RS:1;a7948fca2832:46401 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T11:36:04,525 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T11:36:04,525 INFO [RS:0;a7948fca2832:40843 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T11:36:04,525 INFO [RS:1;a7948fca2832:46401 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T11:36:04,525 INFO [RS:1;a7948fca2832:46401 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T11:36:04,526 INFO [RS:0;a7948fca2832:40843 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T11:36:04,526 INFO [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(959): stopping server a7948fca2832,46401,1731756926229 2024-11-16T11:36:04,526 INFO [RS:1;a7948fca2832:46401 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:36:04,526 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(3091): Received CLOSE for 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:36:04,526 INFO [RS:1;a7948fca2832:46401 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a7948fca2832:46401. 2024-11-16T11:36:04,526 DEBUG [RS:1;a7948fca2832:46401 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:36:04,526 DEBUG [RS:1;a7948fca2832:46401 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:04,526 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(959): stopping server a7948fca2832,40843,1731756924773 2024-11-16T11:36:04,526 INFO [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(976): stopping server a7948fca2832,46401,1731756926229; all regions closed. 2024-11-16T11:36:04,526 INFO [RS:0;a7948fca2832:40843 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:36:04,526 INFO [RS:0;a7948fca2832:40843 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7948fca2832:40843. 2024-11-16T11:36:04,526 DEBUG [RS:0;a7948fca2832:40843 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:36:04,526 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T11:36:04,526 DEBUG [RS:0;a7948fca2832:40843 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:04,526 INFO [RS:0;a7948fca2832:40843 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T11:36:04,526 INFO [RS:0;a7948fca2832:40843 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T11:36:04,526 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7550ed67a678c82a6324e4ade595ce68, disabling compactions & flushes 2024-11-16T11:36:04,526 INFO [RS:0;a7948fca2832:40843 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T11:36:04,526 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:36:04,526 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T11:36:04,527 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:36:04,527 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. after waiting 0 ms 2024-11-16T11:36:04,527 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:36:04,527 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 7550ed67a678c82a6324e4ade595ce68 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T11:36:04,529 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,529 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,529 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T11:36:04,529 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 7550ed67a678c82a6324e4ade595ce68=TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.} 2024-11-16T11:36:04,529 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,529 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:36:04,529 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,529 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,530 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:36:04,530 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:36:04,530 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:36:04,530 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:36:04,530 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:36:04,530 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,530 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-16T11:36:04,530 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,530 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 2024-11-16T11:36:04,530 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:36:04,530 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:36:04,530 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:36:04,531 WARN [IPC Server handler 4 on default port 39669 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 has not been closed. Lease recovery is in progress. RecoveryId = 1078 for block blk_1073741837_1013 2024-11-16T11:36:04,531 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 after 1ms 2024-11-16T11:36:04,536 ERROR [FSHLog-0-hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b-prefix:a7948fca2832,40843,1731756924773.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,536 WARN [FSHLog-0-hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b-prefix:a7948fca2832,40843,1731756924773.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,536 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C40843%2C1731756924773.meta:.meta(num 1731756925986) roll requested 2024-11-16T11:36:04,537 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C40843%2C1731756924773.meta.1731756964537.meta 2024-11-16T11:36:04,538 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/28e1900afaa940f784c6d57d56f1e2b9 is 1079, key is tmprow/info:/1731756964273/Put/seqid=0 2024-11-16T11:36:04,540 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,540 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:36:04,540 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741894_1079 2024-11-16T11:36:04,541 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:36:04,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741896_1081 (size=6027) 2024-11-16T11:36:04,574 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741896_1081 (size=6027) 2024-11-16T11:36:04,574 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,574 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,574 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,574 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:04,575 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756964537.meta 2024-11-16T11:36:04,575 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,575 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43447,DS-713c46c1-8c9c-4a94-9576-bd0b484304aa,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,575 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta 2024-11-16T11:36:04,575 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45383:45383),(127.0.0.1/127.0.0.1:36709:36709)] 2024-11-16T11:36:04,575 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta is not closed yet, will try archiving it next time 2024-11-16T11:36:04,576 WARN [IPC Server handler 4 on default port 39669 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1010 2024-11-16T11:36:04,576 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta after 1ms 2024-11-16T11:36:04,596 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/.tmp/info/f1d786ba4646465d96bfec3cae49025a is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68./info:regioninfo/1731756926726/Put/seqid=0 2024-11-16T11:36:04,597 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:04,598 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:36:04,598 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741897_1083 2024-11-16T11:36:04,598 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:36:04,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741898_1084 (size=7089) 2024-11-16T11:36:04,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741898_1084 (size=7089) 2024-11-16T11:36:04,605 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/.tmp/info/f1d786ba4646465d96bfec3cae49025a 2024-11-16T11:36:04,626 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/.tmp/ns/d4ec9d7b00734408b15ae279d3183287 is 43, key is default/ns:d/1731756926086/Put/seqid=0 2024-11-16T11:36:04,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741899_1085 (size=5153) 2024-11-16T11:36:04,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741899_1085 (size=5153) 2024-11-16T11:36:04,730 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:36:04,930 DEBUG [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7550ed67a678c82a6324e4ade595ce68 2024-11-16T11:36:04,975 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/28e1900afaa940f784c6d57d56f1e2b9 2024-11-16T11:36:04,983 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/.tmp/info/28e1900afaa940f784c6d57d56f1e2b9 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/28e1900afaa940f784c6d57d56f1e2b9 2024-11-16T11:36:04,990 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/28e1900afaa940f784c6d57d56f1e2b9, entries=1, sequenceid=84, filesize=5.9 K 2024-11-16T11:36:04,992 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7550ed67a678c82a6324e4ade595ce68 in 465ms, sequenceid=84, compaction requested=false 2024-11-16T11:36:04,992 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/aee356af88fc4f0db9771da8e8246aaa, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/96b07281870a40518483a92f9f0bd690, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/6cbf97a6ff4f4bc78d17edd1f94f4e42, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/5bd25917cdaa4215baae2d6be983455e, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/a17d8a4238b54bf2b7fd1f0034ba9bf2, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/8547933530584bdb8f80296fb8db2713, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/110b17aa7b2c47bebef8f3bd4d981569] to archive 2024-11-16T11:36:04,994 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T11:36:04,995 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/aee356af88fc4f0db9771da8e8246aaa to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/aee356af88fc4f0db9771da8e8246aaa 2024-11-16T11:36:04,997 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/96b07281870a40518483a92f9f0bd690 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/96b07281870a40518483a92f9f0bd690 2024-11-16T11:36:04,999 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/456575483ca5470ebc6957b4ebab6204 2024-11-16T11:36:05,000 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/6cbf97a6ff4f4bc78d17edd1f94f4e42 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/6cbf97a6ff4f4bc78d17edd1f94f4e42 2024-11-16T11:36:05,002 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/5bd25917cdaa4215baae2d6be983455e to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/5bd25917cdaa4215baae2d6be983455e 2024-11-16T11:36:05,003 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/7844f4037cba4d8690f089989fed9944 2024-11-16T11:36:05,004 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/a17d8a4238b54bf2b7fd1f0034ba9bf2 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/a17d8a4238b54bf2b7fd1f0034ba9bf2 2024-11-16T11:36:05,006 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/8547933530584bdb8f80296fb8db2713 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/8547933530584bdb8f80296fb8db2713 2024-11-16T11:36:05,007 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/110b17aa7b2c47bebef8f3bd4d981569 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/info/110b17aa7b2c47bebef8f3bd4d981569 2024-11-16T11:36:05,008 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a7948fca2832:42791 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T11:36:05,008 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [aee356af88fc4f0db9771da8e8246aaa=10347, 96b07281870a40518483a92f9f0bd690=12506, 456575483ca5470ebc6957b4ebab6204=17994, 6cbf97a6ff4f4bc78d17edd1f94f4e42=6027, 5bd25917cdaa4215baae2d6be983455e=6027, 7844f4037cba4d8690f089989fed9944=18097, a17d8a4238b54bf2b7fd1f0034ba9bf2=6027, 8547933530584bdb8f80296fb8db2713=8190, 110b17aa7b2c47bebef8f3bd4d981569=14660] 2024-11-16T11:36:05,013 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7550ed67a678c82a6324e4ade595ce68/recovered.edits/87.seqid, newMaxSeqId=87, maxSeqId=1 2024-11-16T11:36:05,013 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:36:05,014 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7550ed67a678c82a6324e4ade595ce68: Waiting for close lock at 1731756964526Running coprocessor pre-close hooks at 1731756964526Disabling compacts and flushes for region at 1731756964526Disabling writes for close at 1731756964527 (+1 ms)Obtaining lock to block concurrent updates at 1731756964527Preparing flush snapshotting stores in 7550ed67a678c82a6324e4ade595ce68 at 1731756964527Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68., syncing WAL and waiting on mvcc, flushsize=dataSize=1075, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731756964527Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. at 1731756964528 (+1 ms)Flushing 7550ed67a678c82a6324e4ade595ce68/info: creating writer at 1731756964528Flushing 7550ed67a678c82a6324e4ade595ce68/info: appending metadata at 1731756964537 (+9 ms)Flushing 7550ed67a678c82a6324e4ade595ce68/info: closing flushed file at 1731756964537Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@409fe464: reopening flushed file at 1731756964982 (+445 ms)Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7550ed67a678c82a6324e4ade595ce68 in 465ms, sequenceid=84, compaction requested=false at 1731756964992 (+10 ms)Writing region close event to WAL at 1731756965009 (+17 ms)Running coprocessor post-close hooks at 1731756965013 (+4 ms)Closed at 1731756965013 2024-11-16T11:36:05,014 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731756926358.7550ed67a678c82a6324e4ade595ce68. 2024-11-16T11:36:05,032 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/.tmp/ns/d4ec9d7b00734408b15ae279d3183287 2024-11-16T11:36:05,057 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/.tmp/table/84dec5c689e44269aab6bf7e16a53567 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731756926736/Put/seqid=0 2024-11-16T11:36:05,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741900_1086 (size=5424) 2024-11-16T11:36:05,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741900_1086 (size=5424) 2024-11-16T11:36:05,063 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/.tmp/table/84dec5c689e44269aab6bf7e16a53567 2024-11-16T11:36:05,071 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/.tmp/info/f1d786ba4646465d96bfec3cae49025a as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/info/f1d786ba4646465d96bfec3cae49025a 2024-11-16T11:36:05,078 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/info/f1d786ba4646465d96bfec3cae49025a, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T11:36:05,079 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/.tmp/ns/d4ec9d7b00734408b15ae279d3183287 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/ns/d4ec9d7b00734408b15ae279d3183287 2024-11-16T11:36:05,086 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/ns/d4ec9d7b00734408b15ae279d3183287, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T11:36:05,087 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/.tmp/table/84dec5c689e44269aab6bf7e16a53567 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/table/84dec5c689e44269aab6bf7e16a53567 2024-11-16T11:36:05,094 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/table/84dec5c689e44269aab6bf7e16a53567, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T11:36:05,095 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 565ms, sequenceid=11, compaction requested=false 2024-11-16T11:36:05,100 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T11:36:05,101 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:36:05,101 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:36:05,101 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731756964530Running coprocessor pre-close hooks at 1731756964530Disabling compacts and flushes for region at 1731756964530Disabling writes for close at 1731756964530Obtaining lock to block concurrent updates at 1731756964530Preparing flush snapshotting stores in 1588230740 at 1731756964530Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731756964531 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731756964576 (+45 ms)Flushing 1588230740/info: creating writer at 1731756964576Flushing 1588230740/info: appending metadata at 1731756964595 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731756964595Flushing 1588230740/ns: creating writer at 1731756964611 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731756964626 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731756964626Flushing 1588230740/table: creating writer at 1731756965040 (+414 ms)Flushing 1588230740/table: appending metadata at 1731756965056 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731756965056Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4413321d: reopening flushed file at 1731756965070 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45d30b83: reopening flushed file at 1731756965078 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@216bc532: reopening flushed file at 1731756965086 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 565ms, sequenceid=11, compaction requested=false at 1731756965096 (+10 ms)Writing region close event to WAL at 1731756965097 (+1 ms)Running coprocessor post-close hooks at 1731756965101 (+4 ms)Closed at 1731756965101 2024-11-16T11:36:05,101 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T11:36:05,130 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(976): stopping server a7948fca2832,40843,1731756924773; all regions closed. 2024-11-16T11:36:05,130 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:05,131 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:05,131 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:05,131 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:05,131 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:05,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741895_1080 (size=825) 2024-11-16T11:36:05,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741895_1080 (size=825) 2024-11-16T11:36:05,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3a7f417[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37083, datanodeUuid=0c7b5e84-7aa8-44ea-9459-a0502394f89f, infoPort=45383, infoSecurePort=0, ipcPort=36561, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741876_1059 to 127.0.0.1:44795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:05,268 INFO [regionserver/a7948fca2832:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T11:36:05,269 INFO [regionserver/a7948fca2832:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T11:36:05,273 INFO [regionserver/a7948fca2832:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:36:05,328 INFO [regionserver/a7948fca2832:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T11:36:05,328 INFO [regionserver/a7948fca2832:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T11:36:06,126 INFO [master/a7948fca2832:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T11:36:06,126 INFO [master/a7948fca2832:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T11:36:06,128 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4cce6ff7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741825_1001 to 127.0.0.1:44795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:06,128 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@70ae0229[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741827_1003 to 127.0.0.1:44795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:06,330 INFO [regionserver/a7948fca2832:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:36:07,128 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4cce6ff7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741828_1004 to 127.0.0.1:44795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:07,128 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@70ae0229[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39461, datanodeUuid=737f46b5-4f75-46d5-bb6d-e4740cb99e10, infoPort=36709, infoSecurePort=0, ipcPort=37847, storageInfo=lv=-57;cid=testClusterID;nsid=236562354;c=1731756922121):Failed to transfer BP-222813829-172.17.0.2-1731756922121:blk_1073741836_1012 to 127.0.0.1:44795 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:08,532 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 after 4002ms 2024-11-16T11:36:08,577 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta after 4002ms 2024-11-16T11:36:09,530 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T11:36:09,532 DEBUG [RS:1;a7948fca2832:46401 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs 2024-11-16T11:36:09,532 INFO [RS:1;a7948fca2832:46401 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C46401%2C1731756926229:(num 1731756926462) 2024-11-16T11:36:09,532 DEBUG [RS:1;a7948fca2832:46401 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:09,532 INFO [RS:1;a7948fca2832:46401 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:36:09,532 INFO [RS:1;a7948fca2832:46401 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:36:09,533 INFO [RS:1;a7948fca2832:46401 {}] hbase.ChoreService(370): Chore service for: regionserver/a7948fca2832:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T11:36:09,533 INFO [RS:1;a7948fca2832:46401 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T11:36:09,533 INFO [RS:1;a7948fca2832:46401 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T11:36:09,533 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:36:09,533 INFO [RS:1;a7948fca2832:46401 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T11:36:09,533 INFO [RS:1;a7948fca2832:46401 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:36:09,533 INFO [RS:1;a7948fca2832:46401 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46401 2024-11-16T11:36:09,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:09,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7948fca2832,46401,1731756926229 2024-11-16T11:36:09,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:36:09,598 INFO [RS:1;a7948fca2832:46401 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:36:09,608 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7948fca2832,46401,1731756926229] 2024-11-16T11:36:09,618 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7948fca2832,46401,1731756926229 already deleted, retry=false 2024-11-16T11:36:09,619 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7948fca2832,46401,1731756926229 expired; onlineServers=1 2024-11-16T11:36:09,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:09,708 INFO [RS:1;a7948fca2832:46401 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:36:09,708 INFO [RS:1;a7948fca2832:46401 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7948fca2832,46401,1731756926229; zookeeper connection closed. 2024-11-16T11:36:09,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46401-0x101436d9a3c0002, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:09,709 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@64ba2492 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@64ba2492 2024-11-16T11:36:10,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,131 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T11:36:10,137 DEBUG [RS:0;a7948fca2832:40843 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs 2024-11-16T11:36:10,137 INFO [RS:0;a7948fca2832:40843 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C40843%2C1731756924773.meta:.meta(num 1731756964537) 2024-11-16T11:36:10,137 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,138 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,138 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,138 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,138 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741888_1072 (size=18156) 2024-11-16T11:36:10,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741888_1072 (size=18156) 2024-11-16T11:36:10,144 DEBUG [RS:0;a7948fca2832:40843 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs 2024-11-16T11:36:10,144 INFO [RS:0;a7948fca2832:40843 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C40843%2C1731756924773:(num 1731756963824) 2024-11-16T11:36:10,144 DEBUG [RS:0;a7948fca2832:40843 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:10,144 INFO [RS:0;a7948fca2832:40843 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:36:10,144 INFO [RS:0;a7948fca2832:40843 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:36:10,144 INFO [RS:0;a7948fca2832:40843 {}] hbase.ChoreService(370): Chore service for: regionserver/a7948fca2832:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T11:36:10,144 INFO [RS:0;a7948fca2832:40843 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:36:10,144 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:36:10,145 INFO [RS:0;a7948fca2832:40843 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40843 2024-11-16T11:36:10,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7948fca2832,40843,1731756924773 2024-11-16T11:36:10,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:36:10,198 INFO [RS:0;a7948fca2832:40843 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:36:10,208 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7948fca2832,40843,1731756924773] 2024-11-16T11:36:10,218 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7948fca2832,40843,1731756924773 already deleted, retry=false 2024-11-16T11:36:10,219 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7948fca2832,40843,1731756924773 expired; onlineServers=0 2024-11-16T11:36:10,219 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7948fca2832,42791,1731756924581' ***** 2024-11-16T11:36:10,219 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T11:36:10,219 INFO [M:0;a7948fca2832:42791 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:36:10,219 INFO [M:0;a7948fca2832:42791 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:36:10,219 DEBUG [M:0;a7948fca2832:42791 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T11:36:10,219 DEBUG [M:0;a7948fca2832:42791 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T11:36:10,219 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T11:36:10,219 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756925151 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756925151,5,FailOnTimeoutGroup] 2024-11-16T11:36:10,219 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756925151 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756925151,5,FailOnTimeoutGroup] 2024-11-16T11:36:10,220 INFO [M:0;a7948fca2832:42791 {}] hbase.ChoreService(370): Chore service for: master/a7948fca2832:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T11:36:10,220 INFO [M:0;a7948fca2832:42791 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:36:10,220 DEBUG [M:0;a7948fca2832:42791 {}] master.HMaster(1795): Stopping service threads 2024-11-16T11:36:10,220 INFO [M:0;a7948fca2832:42791 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T11:36:10,220 INFO [M:0;a7948fca2832:42791 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:36:10,220 INFO [M:0;a7948fca2832:42791 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T11:36:10,220 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T11:36:10,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T11:36:10,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:10,229 DEBUG [M:0;a7948fca2832:42791 {}] zookeeper.ZKUtil(347): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T11:36:10,229 WARN [M:0;a7948fca2832:42791 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T11:36:10,230 INFO [M:0;a7948fca2832:42791 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/.lastflushedseqids 2024-11-16T11:36:10,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741901_1087 (size=130) 2024-11-16T11:36:10,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741901_1087 (size=130) 2024-11-16T11:36:10,239 INFO [M:0;a7948fca2832:42791 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T11:36:10,239 INFO [M:0;a7948fca2832:42791 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T11:36:10,240 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:36:10,240 INFO [M:0;a7948fca2832:42791 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:10,240 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:10,240 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:36:10,240 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:10,240 INFO [M:0;a7948fca2832:42791 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-16T11:36:10,257 DEBUG [M:0;a7948fca2832:42791 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc745975aebc400786b83916d056244a is 82, key is hbase:meta,,1/info:regioninfo/1731756926031/Put/seqid=0 2024-11-16T11:36:10,259 WARN [Thread-1088 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:10,259 WARN [Thread-1088 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:36:10,259 WARN [Thread-1088 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741902_1088 2024-11-16T11:36:10,260 WARN [Thread-1088 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:36:10,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741903_1089 (size=5672) 2024-11-16T11:36:10,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741903_1089 (size=5672) 2024-11-16T11:36:10,265 INFO [M:0;a7948fca2832:42791 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc745975aebc400786b83916d056244a 2024-11-16T11:36:10,286 DEBUG [M:0;a7948fca2832:42791 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/60dfee25a0ef4c60a1a0a13d966ac955 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731756926741/Put/seqid=0 2024-11-16T11:36:10,287 WARN [Thread-1095 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:10,288 WARN [Thread-1095 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:39461,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:36:10,288 WARN [Thread-1095 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741904_1090 2024-11-16T11:36:10,288 WARN [Thread-1095 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:36:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741905_1091 (size=6255) 2024-11-16T11:36:10,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741905_1091 (size=6255) 2024-11-16T11:36:10,293 INFO [M:0;a7948fca2832:42791 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/60dfee25a0ef4c60a1a0a13d966ac955 2024-11-16T11:36:10,298 INFO [M:0;a7948fca2832:42791 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 60dfee25a0ef4c60a1a0a13d966ac955 2024-11-16T11:36:10,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:10,308 INFO [RS:0;a7948fca2832:40843 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:36:10,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40843-0x101436d9a3c0001, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:10,308 INFO [RS:0;a7948fca2832:40843 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7948fca2832,40843,1731756924773; zookeeper connection closed. 2024-11-16T11:36:10,309 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@13b46c9f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@13b46c9f 2024-11-16T11:36:10,309 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-16T11:36:10,315 DEBUG [M:0;a7948fca2832:42791 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5774b8fd2dfc4b89bd6129bc9c18e76a is 69, key is a7948fca2832,40843,1731756924773/rs:state/1731756925238/Put/seqid=0 2024-11-16T11:36:10,317 WARN [Thread-1102 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:10,317 WARN [Thread-1102 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK], DatanodeInfoWithStorage[127.0.0.1:39461,DS-590e89d7-197c-4113-851d-bcf056491e78,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:36:10,317 WARN [Thread-1102 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741906_1092 2024-11-16T11:36:10,317 WARN [Thread-1102 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:36:10,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741907_1093 (size=5224) 2024-11-16T11:36:10,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741907_1093 (size=5224) 2024-11-16T11:36:10,322 INFO [M:0;a7948fca2832:42791 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5774b8fd2dfc4b89bd6129bc9c18e76a 2024-11-16T11:36:10,341 DEBUG [M:0;a7948fca2832:42791 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2ab2e981d5154c429fe6f94734f01003 is 52, key is load_balancer_on/state:d/1731756926214/Put/seqid=0 2024-11-16T11:36:10,344 WARN [Thread-1108 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1094 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44795 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:10,344 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1120756260_22 at /127.0.0.1:48526 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741908_1094] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8]'}, localName='127.0.0.1:37083', datanodeUuid='0c7b5e84-7aa8-44ea-9459-a0502394f89f', xmitsInProgress=0}:Exception transferring block BP-222813829-172.17.0.2-1731756922121:blk_1073741908_1094 to mirror 127.0.0.1:44795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:10,344 WARN [Thread-1108 {}] hdfs.DataStreamer(1731): Error Recovery for BP-222813829-172.17.0.2-1731756922121:blk_1073741908_1094 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37083,DS-abad147b-6581-4451-9827-ec9aadfc1352,DISK], DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK]) is bad. 2024-11-16T11:36:10,344 WARN [Thread-1108 {}] hdfs.DataStreamer(1850): Abandoning BP-222813829-172.17.0.2-1731756922121:blk_1073741908_1094 2024-11-16T11:36:10,344 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1120756260_22 at /127.0.0.1:48526 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741908_1094] {}] datanode.BlockReceiver(316): Block 1073741908 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T11:36:10,344 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1120756260_22 at /127.0.0.1:48526 [Receiving block BP-222813829-172.17.0.2-1731756922121:blk_1073741908_1094] {}] datanode.DataXceiver(331): 127.0.0.1:37083:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48526 dst: /127.0.0.1:37083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:10,345 WARN [Thread-1108 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44795,DS-291144d9-44fd-4748-b55f-5ae5897bdb04,DISK] 2024-11-16T11:36:10,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741909_1095 (size=5056) 2024-11-16T11:36:10,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741909_1095 (size=5056) 2024-11-16T11:36:10,350 INFO [M:0;a7948fca2832:42791 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2ab2e981d5154c429fe6f94734f01003 2024-11-16T11:36:10,356 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc745975aebc400786b83916d056244a as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc745975aebc400786b83916d056244a 2024-11-16T11:36:10,362 INFO [M:0;a7948fca2832:42791 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc745975aebc400786b83916d056244a, entries=8, sequenceid=60, filesize=5.5 K 2024-11-16T11:36:10,363 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/60dfee25a0ef4c60a1a0a13d966ac955 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/60dfee25a0ef4c60a1a0a13d966ac955 2024-11-16T11:36:10,369 INFO [M:0;a7948fca2832:42791 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 60dfee25a0ef4c60a1a0a13d966ac955 2024-11-16T11:36:10,369 INFO [M:0;a7948fca2832:42791 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/60dfee25a0ef4c60a1a0a13d966ac955, entries=6, sequenceid=60, filesize=6.1 K 2024-11-16T11:36:10,371 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5774b8fd2dfc4b89bd6129bc9c18e76a as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5774b8fd2dfc4b89bd6129bc9c18e76a 2024-11-16T11:36:10,376 INFO [M:0;a7948fca2832:42791 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5774b8fd2dfc4b89bd6129bc9c18e76a, entries=2, sequenceid=60, filesize=5.1 K 2024-11-16T11:36:10,377 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2ab2e981d5154c429fe6f94734f01003 as hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2ab2e981d5154c429fe6f94734f01003 2024-11-16T11:36:10,382 INFO [M:0;a7948fca2832:42791 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2ab2e981d5154c429fe6f94734f01003, entries=1, sequenceid=60, filesize=4.9 K 2024-11-16T11:36:10,383 INFO [M:0;a7948fca2832:42791 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=60, compaction requested=false 2024-11-16T11:36:10,384 INFO [M:0;a7948fca2832:42791 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:10,384 DEBUG [M:0;a7948fca2832:42791 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731756970240Disabling compacts and flushes for region at 1731756970240Disabling writes for close at 1731756970240Obtaining lock to block concurrent updates at 1731756970240Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731756970240Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731756970240Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731756970241 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731756970241Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731756970257 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731756970257Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731756970270 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731756970285 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731756970285Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731756970298 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731756970314 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731756970315 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731756970327 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731756970341 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731756970341Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a867ff6: reopening flushed file at 1731756970355 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7897c379: reopening flushed file at 1731756970362 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@724b7bfd: reopening flushed file at 1731756970370 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9e20515: reopening flushed file at 1731756970376 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=60, compaction requested=false at 1731756970383 (+7 ms)Writing region close event to WAL at 1731756970384 (+1 ms)Closed at 1731756970384 2024-11-16T11:36:10,385 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,385 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,385 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,385 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,385 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:10,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39461 is added to blk_1073741887_1070 (size=1045) 2024-11-16T11:36:10,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37083 is added to blk_1073741887_1070 (size=1045) 2024-11-16T11:36:10,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:10,552 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T11:36:10,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:10,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:10,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:11,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T11:36:11,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:36:11,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T11:36:11,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T11:36:11,152 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@34afb2e0 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-222813829-172.17.0.2-1731756922121:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:43447,null,null]) java.net.ConnectException: Call From a7948fca2832/172.17.0.2 to localhost:44953 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T11:36:11,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:11,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:12,177 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/WALs/a7948fca2832,42791,1731756924581/a7948fca2832%2C42791%2C1731756924581.1731756924917 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/oldWALs/a7948fca2832%2C42791%2C1731756924581.1731756924917 2024-11-16T11:36:12,180 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/MasterData/oldWALs/a7948fca2832%2C42791%2C1731756924581.1731756924917 to hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/oldWALs/a7948fca2832%2C42791%2C1731756924581.1731756924917$masterlocalwal$ 2024-11-16T11:36:12,180 INFO [M:0;a7948fca2832:42791 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T11:36:12,180 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:36:12,180 INFO [M:0;a7948fca2832:42791 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42791 2024-11-16T11:36:12,180 INFO [M:0;a7948fca2832:42791 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:36:12,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:12,340 INFO [M:0;a7948fca2832:42791 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:36:12,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42791-0x101436d9a3c0000, quorum=127.0.0.1:52242, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:12,375 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@dafb872{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:12,376 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74f0cad6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:12,376 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:12,376 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3edd2eb0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:12,376 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a952b35{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:12,377 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:36:12,377 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:36:12,378 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:36:12,378 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-222813829-172.17.0.2-1731756922121 (Datanode Uuid 737f46b5-4f75-46d5-bb6d-e4740cb99e10) service to localhost/127.0.0.1:39669 2024-11-16T11:36:12,377 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@210a7edb {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:43447,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44953 , LocalHost:localPort a7948fca2832/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T11:36:12,378 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@210a7edb {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:39461,null,null]) java.io.IOException: No block pool offer service for bpid=BP-222813829-172.17.0.2-1731756922121 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:12,378 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@210a7edb {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43447,null,null], DatanodeInfoWithStorage[127.0.0.1:39461,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-222813829-172.17.0.2-1731756922121:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:43447,null,null], DatanodeInfoWithStorage[127.0.0.1:39461,null,null]] 2024-11-16T11:36:12,378 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@210a7edb {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:43447,null,null]) java.io.IOException: No block pool offer service for bpid=BP-222813829-172.17.0.2-1731756922121 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:12,378 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data3/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:12,378 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@210a7edb {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39461,null,null]) java.io.IOException: No block pool offer service for bpid=BP-222813829-172.17.0.2-1731756922121 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:12,378 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@210a7edb {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43447,null,null], DatanodeInfoWithStorage[127.0.0.1:39461,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-222813829-172.17.0.2-1731756922121:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:43447,null,null], DatanodeInfoWithStorage[127.0.0.1:39461,null,null]] 2024-11-16T11:36:12,379 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data4/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:12,379 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:36:12,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b31c6d1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:12,382 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2da63af0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:12,382 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:12,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c5281d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:12,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@766d120{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:12,384 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:36:12,384 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:36:12,384 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:36:12,384 WARN [BP-222813829-172.17.0.2-1731756922121 heartbeating to localhost/127.0.0.1:39669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-222813829-172.17.0.2-1731756922121 (Datanode Uuid 0c7b5e84-7aa8-44ea-9459-a0502394f89f) service to localhost/127.0.0.1:39669 2024-11-16T11:36:12,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data7/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:12,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/cluster_65c0c11a-a2d1-3254-0eef-eb065fcc2a59/data/data8/current/BP-222813829-172.17.0.2-1731756922121 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:12,385 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:36:12,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b1223a8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:36:12,391 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b8a0b06{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:12,391 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:12,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@82e7b75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:12,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ed0b53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:12,399 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T11:36:12,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T11:36:12,439 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fd1b8befdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:39669 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:39669 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:39669 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:39669 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fd1b8befdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:39669 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39931 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39669 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:39669 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39931 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=239 (was 215) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4006 (was 4968) 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=239, ProcessCount=11, AvailableMemoryMB=4005 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.log.dir so I do NOT create it in target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/987d1844-857e-fb23-a6fd-139d0983e326/hadoop.tmp.dir so I do NOT create it in target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa, deleteOnExit=true 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/test.cache.data in system properties and HBase conf 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir in system properties and HBase conf 2024-11-16T11:36:12,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T11:36:12,447 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:36:12,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T11:36:12,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/nfs.dump.dir in system properties and HBase conf 2024-11-16T11:36:12,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/java.io.tmpdir in system properties and HBase conf 2024-11-16T11:36:12,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:36:12,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T11:36:12,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T11:36:12,462 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:36:12,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:12,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:12,841 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:12,847 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:12,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:12,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:12,848 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:36:12,849 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:12,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c9126e8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:12,853 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ca621c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:12,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5910f3cc{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/java.io.tmpdir/jetty-localhost-38857-hadoop-hdfs-3_4_1-tests_jar-_-any-9421839567129302478/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:36:12,960 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@656d64e4{HTTP/1.1, (http/1.1)}{localhost:38857} 2024-11-16T11:36:12,960 INFO [Time-limited test {}] server.Server(415): Started @158182ms 2024-11-16T11:36:12,973 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:36:13,218 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:13,222 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:13,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:13,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:13,224 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:36:13,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b64e74f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:13,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b64032{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:13,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1fa19949{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/java.io.tmpdir/jetty-localhost-45945-hadoop-hdfs-3_4_1-tests_jar-_-any-1923081326810826075/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:13,336 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b3d84a8{HTTP/1.1, (http/1.1)}{localhost:45945} 2024-11-16T11:36:13,336 INFO [Time-limited test {}] server.Server(415): Started @158558ms 2024-11-16T11:36:13,337 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:36:13,366 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:13,370 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:13,371 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:13,371 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:13,371 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:36:13,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19f9ad22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:13,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26d3a1b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:13,504 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7dc1dfbb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/java.io.tmpdir/jetty-localhost-36001-hadoop-hdfs-3_4_1-tests_jar-_-any-12556370869797107633/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:13,504 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62961604{HTTP/1.1, (http/1.1)}{localhost:36001} 2024-11-16T11:36:13,504 INFO [Time-limited test {}] server.Server(415): Started @158726ms 2024-11-16T11:36:13,505 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:36:13,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:13,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:14,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:14,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:14,654 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data1/current/BP-2106001367-172.17.0.2-1731756972475/current, will proceed with Du for space computation calculation, 2024-11-16T11:36:14,654 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data2/current/BP-2106001367-172.17.0.2-1731756972475/current, will proceed with Du for space computation calculation, 2024-11-16T11:36:14,680 WARN [Thread-1169 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:36:14,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c4124fb0528970a with lease ID 0x1ed0d26b7431bf05: Processing first storage report for DS-875e16d7-efc4-4fe3-9515-81888e8dff4c from datanode DatanodeRegistration(127.0.0.1:38263, datanodeUuid=287914b4-629f-429f-b417-8fb75fb89957, infoPort=35765, infoSecurePort=0, ipcPort=45435, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475) 2024-11-16T11:36:14,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c4124fb0528970a with lease ID 0x1ed0d26b7431bf05: from storage DS-875e16d7-efc4-4fe3-9515-81888e8dff4c node DatanodeRegistration(127.0.0.1:38263, datanodeUuid=287914b4-629f-429f-b417-8fb75fb89957, infoPort=35765, infoSecurePort=0, ipcPort=45435, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:14,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7c4124fb0528970a with lease ID 0x1ed0d26b7431bf05: Processing first storage report for DS-abba11ed-9fc2-4052-af92-266d45f46376 from datanode DatanodeRegistration(127.0.0.1:38263, datanodeUuid=287914b4-629f-429f-b417-8fb75fb89957, infoPort=35765, infoSecurePort=0, ipcPort=45435, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475) 2024-11-16T11:36:14,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c4124fb0528970a with lease ID 0x1ed0d26b7431bf05: from storage DS-abba11ed-9fc2-4052-af92-266d45f46376 node DatanodeRegistration(127.0.0.1:38263, datanodeUuid=287914b4-629f-429f-b417-8fb75fb89957, infoPort=35765, infoSecurePort=0, ipcPort=45435, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:14,756 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data4/current/BP-2106001367-172.17.0.2-1731756972475/current, will proceed with Du for space computation calculation, 2024-11-16T11:36:14,756 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data3/current/BP-2106001367-172.17.0.2-1731756972475/current, will proceed with Du for space computation calculation, 2024-11-16T11:36:14,774 WARN [Thread-1192 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:36:14,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2b512edbb53482 with lease ID 0x1ed0d26b7431bf06: Processing first storage report for DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388 from datanode DatanodeRegistration(127.0.0.1:35527, datanodeUuid=9b5b90c9-dcac-408d-8e33-6dedbaa4afd8, infoPort=41413, infoSecurePort=0, ipcPort=36527, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475) 2024-11-16T11:36:14,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2b512edbb53482 with lease ID 0x1ed0d26b7431bf06: from storage DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388 node DatanodeRegistration(127.0.0.1:35527, datanodeUuid=9b5b90c9-dcac-408d-8e33-6dedbaa4afd8, infoPort=41413, infoSecurePort=0, ipcPort=36527, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:14,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2b512edbb53482 with lease ID 0x1ed0d26b7431bf06: Processing first storage report for DS-b2c6e4ba-8f08-495d-b866-63612b3cdca9 from datanode DatanodeRegistration(127.0.0.1:35527, datanodeUuid=9b5b90c9-dcac-408d-8e33-6dedbaa4afd8, infoPort=41413, infoSecurePort=0, ipcPort=36527, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475) 2024-11-16T11:36:14,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2b512edbb53482 with lease ID 0x1ed0d26b7431bf06: from storage DS-b2c6e4ba-8f08-495d-b866-63612b3cdca9 node DatanodeRegistration(127.0.0.1:35527, datanodeUuid=9b5b90c9-dcac-408d-8e33-6dedbaa4afd8, infoPort=41413, infoSecurePort=0, ipcPort=36527, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T11:36:14,839 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8 2024-11-16T11:36:14,842 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/zookeeper_0, clientPort=62036, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T11:36:14,843 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62036 2024-11-16T11:36:14,843 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:14,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:14,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38263 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:36:14,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:36:14,857 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545 with version=8 2024-11-16T11:36:14,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/hbase-staging 2024-11-16T11:36:14,859 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:36:14,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:14,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:14,860 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:36:14,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:14,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:36:14,860 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T11:36:14,860 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:36:14,864 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32837 2024-11-16T11:36:14,866 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32837 connecting to ZooKeeper ensemble=127.0.0.1:62036 2024-11-16T11:36:14,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:328370x0, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:36:14,918 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32837-0x101436e5ea50000 connected 2024-11-16T11:36:14,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:15,000 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:15,003 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:36:15,003 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545, hbase.cluster.distributed=false 2024-11-16T11:36:15,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:36:15,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32837 2024-11-16T11:36:15,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32837 2024-11-16T11:36:15,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32837 2024-11-16T11:36:15,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32837 2024-11-16T11:36:15,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32837 2024-11-16T11:36:15,024 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:36:15,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:15,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:15,025 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:36:15,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:15,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:36:15,025 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T11:36:15,025 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:36:15,026 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38855 2024-11-16T11:36:15,027 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38855 connecting to ZooKeeper ensemble=127.0.0.1:62036 2024-11-16T11:36:15,028 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:15,030 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:15,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:388550x0, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:36:15,046 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:388550x0, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:36:15,046 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38855-0x101436e5ea50001 connected 2024-11-16T11:36:15,046 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T11:36:15,047 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T11:36:15,048 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T11:36:15,049 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:36:15,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38855 2024-11-16T11:36:15,052 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38855 2024-11-16T11:36:15,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38855 2024-11-16T11:36:15,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38855 2024-11-16T11:36:15,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38855 2024-11-16T11:36:15,077 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7948fca2832:32837 2024-11-16T11:36:15,077 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7948fca2832,32837,1731756974859 2024-11-16T11:36:15,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:36:15,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:36:15,088 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7948fca2832,32837,1731756974859 2024-11-16T11:36:15,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T11:36:15,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,099 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T11:36:15,099 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7948fca2832,32837,1731756974859 from backup master directory 2024-11-16T11:36:15,109 WARN [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:36:15,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7948fca2832,32837,1731756974859 2024-11-16T11:36:15,109 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7948fca2832,32837,1731756974859 2024-11-16T11:36:15,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:36:15,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:36:15,113 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/hbase.id] with ID: 81692352-ccc6-4367-8048-a10c7f0d370d 2024-11-16T11:36:15,113 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/.tmp/hbase.id 2024-11-16T11:36:15,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38263 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:36:15,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:36:15,119 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/.tmp/hbase.id]:[hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/hbase.id] 2024-11-16T11:36:15,132 INFO [master/a7948fca2832:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:15,132 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T11:36:15,133 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T11:36:15,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38263 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:36:15,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:36:15,152 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:36:15,153 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T11:36:15,154 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:36:15,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:36:15,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38263 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:36:15,162 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store 2024-11-16T11:36:15,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38263 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:36:15,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:36:15,171 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:15,171 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:36:15,171 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:15,171 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:15,171 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:36:15,171 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:15,171 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:15,171 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731756975171Disabling compacts and flushes for region at 1731756975171Disabling writes for close at 1731756975171Writing region close event to WAL at 1731756975171Closed at 1731756975171 2024-11-16T11:36:15,172 WARN [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/.initializing 2024-11-16T11:36:15,172 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859 2024-11-16T11:36:15,175 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C32837%2C1731756974859, suffix=, logDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859, archiveDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/oldWALs, maxLogs=10 2024-11-16T11:36:15,176 INFO [master/a7948fca2832:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C32837%2C1731756974859.1731756975175 2024-11-16T11:36:15,181 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731756975175 2024-11-16T11:36:15,182 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35765:35765),(127.0.0.1/127.0.0.1:41413:41413)] 2024-11-16T11:36:15,183 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:36:15,183 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:15,183 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,183 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,184 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T11:36:15,186 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:15,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:15,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T11:36:15,188 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:15,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:36:15,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T11:36:15,190 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:15,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:36:15,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T11:36:15,193 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:15,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:36:15,193 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,194 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,194 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,196 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,196 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,196 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T11:36:15,197 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:15,199 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:36:15,200 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847734, jitterRate=0.07795007526874542}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T11:36:15,200 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731756975183Initializing all the Stores at 1731756975184 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756975184Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756975184Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756975184Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756975184Cleaning up temporary data from old regions at 1731756975196 (+12 ms)Region opened successfully at 1731756975200 (+4 ms) 2024-11-16T11:36:15,201 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T11:36:15,204 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@753a3cc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:36:15,205 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T11:36:15,205 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T11:36:15,205 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T11:36:15,205 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T11:36:15,206 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T11:36:15,206 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T11:36:15,207 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T11:36:15,209 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T11:36:15,210 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T11:36:15,218 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T11:36:15,219 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T11:36:15,219 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T11:36:15,229 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T11:36:15,229 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T11:36:15,230 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T11:36:15,239 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T11:36:15,241 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T11:36:15,250 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T11:36:15,252 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T11:36:15,260 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T11:36:15,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:36:15,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:36:15,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,271 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7948fca2832,32837,1731756974859, sessionid=0x101436e5ea50000, setting cluster-up flag (Was=false) 2024-11-16T11:36:15,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,324 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T11:36:15,325 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,32837,1731756974859 2024-11-16T11:36:15,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:15,376 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T11:36:15,377 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,32837,1731756974859 2024-11-16T11:36:15,378 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T11:36:15,380 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T11:36:15,380 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T11:36:15,380 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T11:36:15,381 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7948fca2832,32837,1731756974859 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T11:36:15,382 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:36:15,382 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:36:15,382 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:36:15,382 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:36:15,382 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7948fca2832:0, corePoolSize=10, maxPoolSize=10 2024-11-16T11:36:15,382 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,382 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:36:15,382 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,383 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731757005383 2024-11-16T11:36:15,383 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T11:36:15,383 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T11:36:15,384 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T11:36:15,384 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T11:36:15,384 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T11:36:15,384 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T11:36:15,384 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,384 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T11:36:15,384 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:36:15,384 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T11:36:15,384 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T11:36:15,384 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T11:36:15,385 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T11:36:15,385 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T11:36:15,385 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756975385,5,FailOnTimeoutGroup] 2024-11-16T11:36:15,385 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756975385,5,FailOnTimeoutGroup] 2024-11-16T11:36:15,385 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,385 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T11:36:15,385 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,385 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,386 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:15,386 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T11:36:15,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:36:15,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38263 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:36:15,396 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T11:36:15,396 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545 2024-11-16T11:36:15,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38263 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:36:15,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:36:15,467 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(746): ClusterId : 81692352-ccc6-4367-8048-a10c7f0d370d 2024-11-16T11:36:15,467 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T11:36:15,478 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T11:36:15,478 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T11:36:15,488 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T11:36:15,489 DEBUG [RS:0;a7948fca2832:38855 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63126704, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:36:15,509 DEBUG [RS:0;a7948fca2832:38855 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7948fca2832:38855 2024-11-16T11:36:15,509 INFO [RS:0;a7948fca2832:38855 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T11:36:15,510 INFO [RS:0;a7948fca2832:38855 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T11:36:15,510 DEBUG [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T11:36:15,510 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7948fca2832,32837,1731756974859 with port=38855, startcode=1731756975024 2024-11-16T11:36:15,511 DEBUG [RS:0;a7948fca2832:38855 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T11:36:15,513 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49227, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T11:36:15,513 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32837 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7948fca2832,38855,1731756975024 2024-11-16T11:36:15,513 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32837 {}] master.ServerManager(517): Registering regionserver=a7948fca2832,38855,1731756975024 2024-11-16T11:36:15,515 DEBUG [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545 2024-11-16T11:36:15,515 DEBUG [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45425 2024-11-16T11:36:15,515 DEBUG [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T11:36:15,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:36:15,524 DEBUG [RS:0;a7948fca2832:38855 {}] zookeeper.ZKUtil(111): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7948fca2832,38855,1731756975024 2024-11-16T11:36:15,524 WARN [RS:0;a7948fca2832:38855 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:36:15,524 INFO [RS:0;a7948fca2832:38855 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:36:15,524 DEBUG [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024 2024-11-16T11:36:15,524 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7948fca2832,38855,1731756975024] 2024-11-16T11:36:15,527 INFO [RS:0;a7948fca2832:38855 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T11:36:15,529 INFO [RS:0;a7948fca2832:38855 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T11:36:15,529 INFO [RS:0;a7948fca2832:38855 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T11:36:15,529 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,529 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T11:36:15,530 INFO [RS:0;a7948fca2832:38855 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T11:36:15,530 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:36:15,531 DEBUG [RS:0;a7948fca2832:38855 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:36:15,536 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,536 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,536 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,536 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,537 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,537 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,38855,1731756975024-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:36:15,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:15,554 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T11:36:15,554 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,38855,1731756975024-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,554 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,554 INFO [RS:0;a7948fca2832:38855 {}] regionserver.Replication(171): a7948fca2832,38855,1731756975024 started 2024-11-16T11:36:15,570 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:15,570 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(1482): Serving as a7948fca2832,38855,1731756975024, RpcServer on a7948fca2832/172.17.0.2:38855, sessionid=0x101436e5ea50001 2024-11-16T11:36:15,571 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T11:36:15,571 DEBUG [RS:0;a7948fca2832:38855 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7948fca2832,38855,1731756975024 2024-11-16T11:36:15,571 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,38855,1731756975024' 2024-11-16T11:36:15,571 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T11:36:15,571 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T11:36:15,572 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T11:36:15,572 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T11:36:15,572 DEBUG [RS:0;a7948fca2832:38855 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7948fca2832,38855,1731756975024 2024-11-16T11:36:15,572 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,38855,1731756975024' 2024-11-16T11:36:15,572 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T11:36:15,572 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T11:36:15,573 DEBUG [RS:0;a7948fca2832:38855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T11:36:15,573 INFO [RS:0;a7948fca2832:38855 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T11:36:15,573 INFO [RS:0;a7948fca2832:38855 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T11:36:15,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:15,675 INFO [RS:0;a7948fca2832:38855 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C38855%2C1731756975024, suffix=, logDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024, archiveDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/oldWALs, maxLogs=32 2024-11-16T11:36:15,677 INFO [RS:0;a7948fca2832:38855 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C38855%2C1731756975024.1731756975677 2024-11-16T11:36:15,684 INFO [RS:0;a7948fca2832:38855 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 2024-11-16T11:36:15,697 DEBUG [RS:0;a7948fca2832:38855 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41413:41413),(127.0.0.1/127.0.0.1:35765:35765)] 2024-11-16T11:36:15,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:15,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:36:15,809 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:36:15,809 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:15,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:15,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:36:15,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:36:15,811 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:15,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:15,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:36:15,813 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:36:15,813 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:15,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:15,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:36:15,814 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:36:15,814 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:15,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:15,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:36:15,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740 2024-11-16T11:36:15,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740 2024-11-16T11:36:15,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:36:15,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:36:15,818 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:36:15,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:36:15,821 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:36:15,821 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733032, jitterRate=-0.0679028332233429}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:36:15,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731756975806Initializing all the Stores at 1731756975807 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756975807Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756975807Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756975807Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756975807Cleaning up temporary data from old regions at 1731756975817 (+10 ms)Region opened successfully at 1731756975822 (+5 ms) 2024-11-16T11:36:15,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:36:15,822 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:36:15,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:36:15,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:36:15,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:36:15,822 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:36:15,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731756975822Disabling compacts and flushes for region at 1731756975822Disabling writes for close at 1731756975822Writing region close event to WAL at 1731756975822Closed at 1731756975822 2024-11-16T11:36:15,823 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:36:15,824 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T11:36:15,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T11:36:15,825 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:36:15,826 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T11:36:15,976 DEBUG [a7948fca2832:32837 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T11:36:15,977 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7948fca2832,38855,1731756975024 2024-11-16T11:36:15,978 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,38855,1731756975024, state=OPENING 2024-11-16T11:36:16,060 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T11:36:16,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:16,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:16,071 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:36:16,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:36:16,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:36:16,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,38855,1731756975024}] 2024-11-16T11:36:16,225 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T11:36:16,228 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37585, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T11:36:16,233 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T11:36:16,233 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:36:16,236 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C38855%2C1731756975024.meta, suffix=.meta, logDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024, archiveDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/oldWALs, maxLogs=32 2024-11-16T11:36:16,237 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta 2024-11-16T11:36:16,242 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta 2024-11-16T11:36:16,243 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35765:35765),(127.0.0.1/127.0.0.1:41413:41413)] 2024-11-16T11:36:16,244 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:36:16,244 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T11:36:16,244 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T11:36:16,244 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T11:36:16,244 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T11:36:16,244 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:16,244 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T11:36:16,244 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T11:36:16,246 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:36:16,246 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:36:16,247 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:16,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:16,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:36:16,248 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:36:16,248 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:16,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:16,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:36:16,249 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:36:16,249 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:16,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:16,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:36:16,250 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:36:16,250 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:16,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:16,251 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:36:16,252 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740 2024-11-16T11:36:16,253 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740 2024-11-16T11:36:16,255 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:36:16,255 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:36:16,256 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:36:16,258 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:36:16,259 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=825423, jitterRate=0.04958042502403259}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:36:16,259 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T11:36:16,260 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731756976245Writing region info on filesystem at 1731756976245Initializing all the Stores at 1731756976245Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756976245Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756976246 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756976246Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731756976246Cleaning up temporary data from old regions at 1731756976255 (+9 ms)Running coprocessor post-open hooks at 1731756976259 (+4 ms)Region opened successfully at 1731756976260 (+1 ms) 2024-11-16T11:36:16,261 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731756976225 2024-11-16T11:36:16,264 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T11:36:16,264 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T11:36:16,265 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,38855,1731756975024 2024-11-16T11:36:16,266 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,38855,1731756975024, state=OPEN 2024-11-16T11:36:16,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:36:16,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:36:16,339 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:36:16,339 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:36:16,339 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7948fca2832,38855,1731756975024 2024-11-16T11:36:16,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T11:36:16,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,38855,1731756975024 in 268 msec 2024-11-16T11:36:16,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T11:36:16,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 519 msec 2024-11-16T11:36:16,348 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:36:16,348 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T11:36:16,350 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:36:16,350 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,38855,1731756975024, seqNum=-1] 2024-11-16T11:36:16,350 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:36:16,351 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41291, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:36:16,358 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 977 msec 2024-11-16T11:36:16,358 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731756976358, completionTime=-1 2024-11-16T11:36:16,358 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T11:36:16,358 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T11:36:16,360 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T11:36:16,360 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731757036360 2024-11-16T11:36:16,360 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731757096360 2024-11-16T11:36:16,360 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-16T11:36:16,361 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32837,1731756974859-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:16,361 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32837,1731756974859-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:16,361 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32837,1731756974859-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:16,361 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7948fca2832:32837, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:16,361 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:16,361 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:16,363 DEBUG [master/a7948fca2832:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T11:36:16,365 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.256sec 2024-11-16T11:36:16,365 INFO [master/a7948fca2832:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T11:36:16,365 INFO [master/a7948fca2832:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T11:36:16,365 INFO [master/a7948fca2832:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T11:36:16,365 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T11:36:16,365 INFO [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T11:36:16,365 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32837,1731756974859-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:36:16,366 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32837,1731756974859-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T11:36:16,367 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7928d23a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:36:16,367 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7948fca2832,32837,-1 for getting cluster id 2024-11-16T11:36:16,367 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T11:36:16,368 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T11:36:16,368 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T11:36:16,368 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32837,1731756974859-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:16,369 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '81692352-ccc6-4367-8048-a10c7f0d370d' 2024-11-16T11:36:16,369 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T11:36:16,369 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "81692352-ccc6-4367-8048-a10c7f0d370d" 2024-11-16T11:36:16,370 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60da48cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:36:16,370 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7948fca2832,32837,-1] 2024-11-16T11:36:16,370 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T11:36:16,370 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:16,372 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55138, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T11:36:16,373 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@586c1103, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:36:16,373 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:36:16,374 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,38855,1731756975024, seqNum=-1] 2024-11-16T11:36:16,375 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:36:16,376 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59914, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:36:16,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7948fca2832,32837,1731756974859 2024-11-16T11:36:16,378 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:16,381 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T11:36:16,381 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-16T11:36:16,381 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-16T11:36:16,381 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T11:36:16,382 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is a7948fca2832,32837,1731756974859 2024-11-16T11:36:16,382 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5a2e7a96 2024-11-16T11:36:16,382 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T11:36:16,385 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55146, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T11:36:16,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32837 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T11:36:16,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32837 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T11:36:16,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32837 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:36:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32837 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T11:36:16,389 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T11:36:16,389 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:16,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32837 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-16T11:36:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32837 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:36:16,391 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T11:36:16,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38263 is added to blk_1073741835_1011 (size=395) 2024-11-16T11:36:16,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741835_1011 (size=395) 2024-11-16T11:36:16,400 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 722ec77409feac066298392be4acfeae, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545 2024-11-16T11:36:16,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35527 is added to blk_1073741836_1012 (size=78) 2024-11-16T11:36:16,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38263 is added to blk_1073741836_1012 (size=78) 2024-11-16T11:36:16,407 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:16,407 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 722ec77409feac066298392be4acfeae, disabling compactions & flushes 2024-11-16T11:36:16,407 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:16,407 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:16,407 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. after waiting 0 ms 2024-11-16T11:36:16,407 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:16,407 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:16,407 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 722ec77409feac066298392be4acfeae: Waiting for close lock at 1731756976407Disabling compacts and flushes for region at 1731756976407Disabling writes for close at 1731756976407Writing region close event to WAL at 1731756976407Closed at 1731756976407 2024-11-16T11:36:16,408 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T11:36:16,409 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731756976409"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731756976409"}]},"ts":"1731756976409"} 2024-11-16T11:36:16,411 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T11:36:16,413 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T11:36:16,413 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731756976413"}]},"ts":"1731756976413"} 2024-11-16T11:36:16,415 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-16T11:36:16,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=722ec77409feac066298392be4acfeae, ASSIGN}] 2024-11-16T11:36:16,417 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=722ec77409feac066298392be4acfeae, ASSIGN 2024-11-16T11:36:16,418 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=722ec77409feac066298392be4acfeae, ASSIGN; state=OFFLINE, location=a7948fca2832,38855,1731756975024; forceNewPlan=false, retain=false 2024-11-16T11:36:16,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:16,569 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=722ec77409feac066298392be4acfeae, regionState=OPENING, regionLocation=a7948fca2832,38855,1731756975024 2024-11-16T11:36:16,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=722ec77409feac066298392be4acfeae, ASSIGN because future has completed 2024-11-16T11:36:16,573 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 722ec77409feac066298392be4acfeae, server=a7948fca2832,38855,1731756975024}] 2024-11-16T11:36:16,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:16,640 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T11:36:16,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:16,731 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:16,731 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 722ec77409feac066298392be4acfeae, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:36:16,731 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,731 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:16,731 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,731 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,733 INFO [StoreOpener-722ec77409feac066298392be4acfeae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,734 INFO [StoreOpener-722ec77409feac066298392be4acfeae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 722ec77409feac066298392be4acfeae columnFamilyName info 2024-11-16T11:36:16,734 DEBUG [StoreOpener-722ec77409feac066298392be4acfeae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:16,735 INFO [StoreOpener-722ec77409feac066298392be4acfeae-1 {}] regionserver.HStore(327): Store=722ec77409feac066298392be4acfeae/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:36:16,735 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,736 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/default/TestLogRolling-testLogRollOnPipelineRestart/722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,736 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/default/TestLogRolling-testLogRollOnPipelineRestart/722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,736 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,736 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,738 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,740 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/default/TestLogRolling-testLogRollOnPipelineRestart/722ec77409feac066298392be4acfeae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:36:16,740 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 722ec77409feac066298392be4acfeae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734933, jitterRate=-0.06548541784286499}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T11:36:16,741 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 722ec77409feac066298392be4acfeae 2024-11-16T11:36:16,741 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 722ec77409feac066298392be4acfeae: Running coprocessor pre-open hook at 1731756976732Writing region info on filesystem at 1731756976732Initializing all the Stores at 1731756976732Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731756976733 (+1 ms)Cleaning up temporary data from old regions at 1731756976736 (+3 ms)Running coprocessor post-open hooks at 1731756976741 (+5 ms)Region opened successfully at 1731756976741 2024-11-16T11:36:16,742 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae., pid=6, masterSystemTime=1731756976727 2024-11-16T11:36:16,744 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:16,744 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:16,745 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=722ec77409feac066298392be4acfeae, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,38855,1731756975024 2024-11-16T11:36:16,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 722ec77409feac066298392be4acfeae, server=a7948fca2832,38855,1731756975024 because future has completed 2024-11-16T11:36:16,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T11:36:16,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 722ec77409feac066298392be4acfeae, server=a7948fca2832,38855,1731756975024 in 176 msec 2024-11-16T11:36:16,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T11:36:16,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=722ec77409feac066298392be4acfeae, ASSIGN in 337 msec 2024-11-16T11:36:16,756 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T11:36:16,756 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731756976756"}]},"ts":"1731756976756"} 2024-11-16T11:36:16,759 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-16T11:36:16,760 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T11:36:16,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 374 msec 2024-11-16T11:36:17,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:17,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:18,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:18,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:19,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:19,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:20,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:20,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:21,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T11:36:21,137 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T11:36:21,138 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T11:36:21,138 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-16T11:36:21,139 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:36:21,139 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T11:36:21,139 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T11:36:21,139 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T11:36:21,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:21,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:21,747 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T11:36:21,773 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:21,788 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T11:36:21,788 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-16T11:36:22,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:22,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:23,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:23,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:24,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:24,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:25,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:25,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:26,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32837 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:36:26,426 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-16T11:36:26,427 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-16T11:36:26,431 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T11:36:26,431 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:26,436 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae., hostname=a7948fca2832,38855,1731756975024, seqNum=2] 2024-11-16T11:36:26,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:26,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:27,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:27,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:28,439 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 2024-11-16T11:36:28,440 WARN [ResponseProcessor for block BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:35527,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:28,440 WARN [DataStreamer for file /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731756975175 block BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK], DatanodeInfoWithStorage[127.0.0.1:35527,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35527,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]) is bad. 2024-11-16T11:36:28,440 WARN [PacketResponder: BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35527] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,441 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1969125605_22 at /127.0.0.1:37750 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38263:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37750 dst: /127.0.0.1:38263 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,441 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1969125605_22 at /127.0.0.1:44626 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35527:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44626 dst: /127.0.0.1:35527 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,443 WARN [ResponseProcessor for block BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:35527,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:28,443 WARN [ResponseProcessor for block BP-2106001367-172.17.0.2-1731756972475:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2106001367-172.17.0.2-1731756972475:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:28,443 WARN [DataStreamer for file /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta block BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK], DatanodeInfoWithStorage[127.0.0.1:35527,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35527,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]) is bad. 2024-11-16T11:36:28,443 WARN [PacketResponder: BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35527] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,443 WARN [DataStreamer for file /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 block BP-2106001367-172.17.0.2-1731756972475:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2106001367-172.17.0.2-1731756972475:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35527,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK], DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35527,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]) is bad. 2024-11-16T11:36:28,443 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_65788428_22 at /127.0.0.1:44652 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35527:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44652 dst: /127.0.0.1:35527 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,444 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_65788428_22 at /127.0.0.1:37786 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38263:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37786 dst: /127.0.0.1:38263 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,444 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_65788428_22 at /127.0.0.1:37782 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38263:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37782 dst: /127.0.0.1:38263 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,444 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_65788428_22 at /127.0.0.1:44660 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35527:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44660 dst: /127.0.0.1:35527 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,512 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7dc1dfbb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:28,513 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62961604{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:28,513 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:28,513 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26d3a1b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:28,513 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19f9ad22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:28,515 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:36:28,515 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:36:28,515 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2106001367-172.17.0.2-1731756972475 (Datanode Uuid 9b5b90c9-dcac-408d-8e33-6dedbaa4afd8) service to localhost/127.0.0.1:45425 2024-11-16T11:36:28,515 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:36:28,515 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data3/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:28,516 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data4/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:28,516 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:36:28,537 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:28,542 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:28,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:28,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:28,543 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:36:28,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d9ee158{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:28,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@714162c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:28,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:28,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:28,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4830599{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/java.io.tmpdir/jetty-localhost-39647-hadoop-hdfs-3_4_1-tests_jar-_-any-16609963015790705219/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:28,687 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2bc4f0c3{HTTP/1.1, (http/1.1)}{localhost:39647} 2024-11-16T11:36:28,687 INFO [Time-limited test {}] server.Server(415): Started @173909ms 2024-11-16T11:36:28,689 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:36:28,720 WARN [ResponseProcessor for block BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:28,720 WARN [ResponseProcessor for block BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:28,721 WARN [ResponseProcessor for block BP-2106001367-172.17.0.2-1731756972475:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2106001367-172.17.0.2-1731756972475:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:28,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_65788428_22 at /127.0.0.1:51710 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38263:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51710 dst: /127.0.0.1:38263 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_65788428_22 at /127.0.0.1:51712 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38263:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51712 dst: /127.0.0.1:38263 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1969125605_22 at /127.0.0.1:51694 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38263:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51694 dst: /127.0.0.1:38263 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:28,724 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1fa19949{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:28,725 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b3d84a8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:28,725 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:28,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b64032{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:28,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b64e74f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:28,726 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:36:28,726 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:36:28,726 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2106001367-172.17.0.2-1731756972475 (Datanode Uuid 287914b4-629f-429f-b417-8fb75fb89957) service to localhost/127.0.0.1:45425 2024-11-16T11:36:28,726 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:36:28,727 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data1/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:28,727 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data2/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:28,727 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:36:28,743 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:28,748 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:28,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:28,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:28,753 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:36:28,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@241661d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:28,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@558f8fa7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:28,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@42e5eb45{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/java.io.tmpdir/jetty-localhost-38229-hadoop-hdfs-3_4_1-tests_jar-_-any-11001284575001670314/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:28,876 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55d93ce5{HTTP/1.1, (http/1.1)}{localhost:38229} 2024-11-16T11:36:28,876 INFO [Time-limited test {}] server.Server(415): Started @174098ms 2024-11-16T11:36:28,877 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:36:29,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:29,588 WARN [Thread-1341 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:36:29,591 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e09de375e5e774d with lease ID 0x1ed0d26b7431bf07: from storage DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388 node DatanodeRegistration(127.0.0.1:40327, datanodeUuid=9b5b90c9-dcac-408d-8e33-6dedbaa4afd8, infoPort=36889, infoSecurePort=0, ipcPort=45631, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:29,591 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e09de375e5e774d with lease ID 0x1ed0d26b7431bf07: from storage DS-b2c6e4ba-8f08-495d-b866-63612b3cdca9 node DatanodeRegistration(127.0.0.1:40327, datanodeUuid=9b5b90c9-dcac-408d-8e33-6dedbaa4afd8, infoPort=36889, infoSecurePort=0, ipcPort=45631, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:29,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:29,666 WARN [Thread-1361 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:36:29,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a7daf42fd876c53 with lease ID 0x1ed0d26b7431bf08: from storage DS-875e16d7-efc4-4fe3-9515-81888e8dff4c node DatanodeRegistration(127.0.0.1:35215, datanodeUuid=287914b4-629f-429f-b417-8fb75fb89957, infoPort=36969, infoSecurePort=0, ipcPort=45461, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:29,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a7daf42fd876c53 with lease ID 0x1ed0d26b7431bf08: from storage DS-abba11ed-9fc2-4052-af92-266d45f46376 node DatanodeRegistration(127.0.0.1:35215, datanodeUuid=287914b4-629f-429f-b417-8fb75fb89957, infoPort=36969, infoSecurePort=0, ipcPort=45461, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T11:36:29,909 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-16T11:36:29,912 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-16T11:36:29,914 ERROR [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545-prefix:a7948fca2832,38855,1731756975024 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:29,914 WARN [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545-prefix:a7948fca2832,38855,1731756975024 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:29,914 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C38855%2C1731756975024:(num 1731756975677) roll requested 2024-11-16T11:36:29,915 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C38855%2C1731756975024.1731756989915 2024-11-16T11:36:29,921 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 newFile=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 2024-11-16T11:36:29,921 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:29,921 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:29,921 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:29,921 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:29,921 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:29,921 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 2024-11-16T11:36:29,924 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:29,924 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:29,925 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 2024-11-16T11:36:29,925 WARN [IPC Server handler 4 on default port 45425 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-16T11:36:29,926 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 after 1ms 2024-11-16T11:36:29,932 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36889:36889),(127.0.0.1/127.0.0.1:36969:36969)] 2024-11-16T11:36:29,933 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 is not closed yet, will try archiving it next time 2024-11-16T11:36:30,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:30,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:30,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741833_1017 (size=1632) 2024-11-16T11:36:31,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:31,591 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T11:36:31,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:31,939 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-16T11:36:32,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:32,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:33,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:33,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:33,926 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 after 4001ms 2024-11-16T11:36:33,944 WARN [ResponseProcessor for block BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:35215,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:33,944 WARN [DataStreamer for file /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 block BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40327,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35215,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]) is bad. 2024-11-16T11:36:33,944 WARN [PacketResponder: BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35215] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:33,945 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_65788428_22 at /127.0.0.1:58924 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58924 dst: /127.0.0.1:40327 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:33,945 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_65788428_22 at /127.0.0.1:55750 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55750 dst: /127.0.0.1:35215 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:34,001 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@42e5eb45{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:34,001 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55d93ce5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:34,001 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:34,001 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@558f8fa7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:34,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@241661d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:34,003 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:36:34,003 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2106001367-172.17.0.2-1731756972475 (Datanode Uuid 287914b4-629f-429f-b417-8fb75fb89957) service to localhost/127.0.0.1:45425 2024-11-16T11:36:34,004 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:36:34,004 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:36:34,004 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data1/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:34,004 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:36:34,004 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data2/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:34,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:34,015 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:34,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:34,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:34,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:36:34,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@546b6142{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:34,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1cdbb868{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:34,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f2608b2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/java.io.tmpdir/jetty-localhost-36037-hadoop-hdfs-3_4_1-tests_jar-_-any-2932990358921733569/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:34,123 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1368c96a{HTTP/1.1, (http/1.1)}{localhost:36037} 2024-11-16T11:36:34,123 INFO [Time-limited test {}] server.Server(415): Started @179345ms 2024-11-16T11:36:34,124 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:36:34,157 WARN [ResponseProcessor for block BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:34,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_65788428_22 at /127.0.0.1:40032 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40032 dst: /127.0.0.1:40327 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:34,167 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4830599{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:34,168 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2bc4f0c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:34,168 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:34,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@714162c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:34,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d9ee158{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:34,169 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:36:34,169 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:36:34,169 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:36:34,169 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2106001367-172.17.0.2-1731756972475 (Datanode Uuid 9b5b90c9-dcac-408d-8e33-6dedbaa4afd8) service to localhost/127.0.0.1:45425 2024-11-16T11:36:34,170 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data3/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:34,170 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data4/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:34,170 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:36:34,184 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:34,187 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:34,188 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:34,188 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:34,188 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:36:34,188 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d1eba33{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:34,188 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64aa4bc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:34,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d60d3c4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/java.io.tmpdir/jetty-localhost-37651-hadoop-hdfs-3_4_1-tests_jar-_-any-1533492101381664364/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:34,291 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41b83c1e{HTTP/1.1, (http/1.1)}{localhost:37651} 2024-11-16T11:36:34,291 INFO [Time-limited test {}] server.Server(415): Started @179513ms 2024-11-16T11:36:34,293 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:36:34,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:34,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:34,752 WARN [Thread-1415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:36:34,762 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe8525326996ca2a3 with lease ID 0x1ed0d26b7431bf09: from storage DS-875e16d7-efc4-4fe3-9515-81888e8dff4c node DatanodeRegistration(127.0.0.1:38713, datanodeUuid=287914b4-629f-429f-b417-8fb75fb89957, infoPort=34033, infoSecurePort=0, ipcPort=34647, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:34,762 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe8525326996ca2a3 with lease ID 0x1ed0d26b7431bf09: from storage DS-abba11ed-9fc2-4052-af92-266d45f46376 node DatanodeRegistration(127.0.0.1:38713, datanodeUuid=287914b4-629f-429f-b417-8fb75fb89957, infoPort=34033, infoSecurePort=0, ipcPort=34647, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:34,930 WARN [Thread-1435 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:36:34,933 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb21fd12ce6327d42 with lease ID 0x1ed0d26b7431bf0a: from storage DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388 node DatanodeRegistration(127.0.0.1:43041, datanodeUuid=9b5b90c9-dcac-408d-8e33-6dedbaa4afd8, infoPort=41059, infoSecurePort=0, ipcPort=45399, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T11:36:34,933 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb21fd12ce6327d42 with lease ID 0x1ed0d26b7431bf0a: from storage DS-b2c6e4ba-8f08-495d-b866-63612b3cdca9 node DatanodeRegistration(127.0.0.1:43041, datanodeUuid=9b5b90c9-dcac-408d-8e33-6dedbaa4afd8, infoPort=41059, infoSecurePort=0, ipcPort=45399, storageInfo=lv=-57;cid=testClusterID;nsid=782509308;c=1731756972475), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:35,320 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-16T11:36:35,322 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-16T11:36:35,324 ERROR [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545-prefix:a7948fca2832,38855,1731756975024 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40327,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:35,324 WARN [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545-prefix:a7948fca2832,38855,1731756975024 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40327,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:35,324 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C38855%2C1731756975024:(num 1731756989915) roll requested 2024-11-16T11:36:35,325 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C38855%2C1731756975024.1731756995324 2024-11-16T11:36:35,331 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 newFile=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756995324 2024-11-16T11:36:35,332 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:35,332 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:35,332 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:35,332 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:35,332 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:35,332 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756995324 2024-11-16T11:36:35,333 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40327,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:35,333 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40327,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:35,333 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 2024-11-16T11:36:35,333 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34033:34033),(127.0.0.1/127.0.0.1:41059:41059)] 2024-11-16T11:36:35,333 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 is not closed yet, will try archiving it next time 2024-11-16T11:36:35,333 WARN [IPC Server handler 1 on default port 45425 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-16T11:36:35,334 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 after 1ms 2024-11-16T11:36:35,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:35,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:36,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:36,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:37,335 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C38855%2C1731756975024.1731756997335 2024-11-16T11:36:37,341 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756995324 newFile=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 2024-11-16T11:36:37,341 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:37,341 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:37,341 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:37,341 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:37,342 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:37,342 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756995324 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 2024-11-16T11:36:37,343 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41059:41059),(127.0.0.1/127.0.0.1:34033:34033)] 2024-11-16T11:36:37,343 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 is not closed yet, will try archiving it next time 2024-11-16T11:36:37,343 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756995324 is not closed yet, will try archiving it next time 2024-11-16T11:36:37,344 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 2024-11-16T11:36:37,344 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 2024-11-16T11:36:37,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741838_1019 (size=1264) 2024-11-16T11:36:37,345 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 after 1ms 2024-11-16T11:36:37,345 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 2024-11-16T11:36:37,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741838_1019 (size=1264) 2024-11-16T11:36:37,345 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 is not closed yet, will try archiving it next time 2024-11-16T11:36:37,357 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731756976741/Put/vlen=218/seqid=0] 2024-11-16T11:36:37,358 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731756986437/Put/vlen=1045/seqid=0] 2024-11-16T11:36:37,358 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756975677 2024-11-16T11:36:37,358 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 2024-11-16T11:36:37,358 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 2024-11-16T11:36:37,359 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 after 0ms 2024-11-16T11:36:37,359 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 2024-11-16T11:36:37,362 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731756989914/Put/vlen=1045/seqid=0] 2024-11-16T11:36:37,363 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731756991941/Put/vlen=1045/seqid=0] 2024-11-16T11:36:37,363 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 2024-11-16T11:36:37,363 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756995324 2024-11-16T11:36:37,363 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756995324 2024-11-16T11:36:37,363 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756995324 after 0ms 2024-11-16T11:36:37,363 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756995324 2024-11-16T11:36:37,367 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731756995324/Put/vlen=1045/seqid=0] 2024-11-16T11:36:37,367 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 2024-11-16T11:36:37,367 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 2024-11-16T11:36:37,368 WARN [IPC Server handler 3 on default port 45425 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-16T11:36:37,368 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 after 1ms 2024-11-16T11:36:37,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:37,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:37,935 WARN [ResponseProcessor for block BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:37,934 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1969125605_22 at /127.0.0.1:38742 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43041:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38742 dst: /127.0.0.1:43041 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43041 remote=/127.0.0.1:38742]. Total timeout mills is 60000, 59406 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:37,935 WARN [DataStreamer for file /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 block BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43041,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK], DatanodeInfoWithStorage[127.0.0.1:38713,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43041,DS-4d3a02cc-a19f-42e3-9acc-fb9549a71388,DISK]) is bad. 2024-11-16T11:36:37,935 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1969125605_22 at /127.0.0.1:47550 [Receiving block BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:38713:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47550 dst: /127.0.0.1:38713 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:36:37,936 WARN [DataStreamer for file /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 block BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:37,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741839_1022 (size=85) 2024-11-16T11:36:37,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741839_1022 (size=85) 2024-11-16T11:36:38,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:38,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:39,335 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756989915 after 4002ms 2024-11-16T11:36:39,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:39,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:40,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:40,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:40,762 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T11:36:41,369 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 after 4002ms 2024-11-16T11:36:41,369 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 2024-11-16T11:36:41,373 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 2024-11-16T11:36:41,373 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-16T11:36:41,374 ERROR [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545-prefix:a7948fca2832,38855,1731756975024.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:41,374 WARN [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545-prefix:a7948fca2832,38855,1731756975024.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:41,374 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C38855%2C1731756975024.meta:.meta(num 1731756976237) roll requested 2024-11-16T11:36:41,375 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C38855%2C1731756975024.meta.1731757001374.meta 2024-11-16T11:36:41,381 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,381 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,381 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,382 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,382 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,382 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731757001374.meta 2024-11-16T11:36:41,382 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:41,383 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:41,383 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta 2024-11-16T11:36:41,383 WARN [IPC Server handler 1 on default port 45425 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-16T11:36:41,383 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41059:41059),(127.0.0.1/127.0.0.1:34033:34033)] 2024-11-16T11:36:41,383 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta is not closed yet, will try archiving it next time 2024-11-16T11:36:41,384 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta after 1ms 2024-11-16T11:36:41,401 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/.tmp/info/7bc4081b94844c528789063bb2de5ff3 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae./info:regioninfo/1731756976745/Put/seqid=0 2024-11-16T11:36:41,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741841_1025 (size=7125) 2024-11-16T11:36:41,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741841_1025 (size=7125) 2024-11-16T11:36:41,406 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/.tmp/info/7bc4081b94844c528789063bb2de5ff3 2024-11-16T11:36:41,427 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/.tmp/ns/2a8097b996c74db0aefb9ac33df2960a is 43, key is default/ns:d/1731756976352/Put/seqid=0 2024-11-16T11:36:41,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741842_1026 (size=5153) 2024-11-16T11:36:41,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741842_1026 (size=5153) 2024-11-16T11:36:41,433 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/.tmp/ns/2a8097b996c74db0aefb9ac33df2960a 2024-11-16T11:36:41,454 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/.tmp/table/3da8abb4f8e046baade6fc617511646b is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731756976756/Put/seqid=0 2024-11-16T11:36:41,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741843_1027 (size=5438) 2024-11-16T11:36:41,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741843_1027 (size=5438) 2024-11-16T11:36:41,462 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/.tmp/table/3da8abb4f8e046baade6fc617511646b 2024-11-16T11:36:41,468 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/.tmp/info/7bc4081b94844c528789063bb2de5ff3 as hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/info/7bc4081b94844c528789063bb2de5ff3 2024-11-16T11:36:41,474 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/info/7bc4081b94844c528789063bb2de5ff3, entries=10, sequenceid=11, filesize=7.0 K 2024-11-16T11:36:41,475 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/.tmp/ns/2a8097b996c74db0aefb9ac33df2960a as hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/ns/2a8097b996c74db0aefb9ac33df2960a 2024-11-16T11:36:41,481 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/ns/2a8097b996c74db0aefb9ac33df2960a, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T11:36:41,482 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/.tmp/table/3da8abb4f8e046baade6fc617511646b as hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/table/3da8abb4f8e046baade6fc617511646b 2024-11-16T11:36:41,489 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/table/3da8abb4f8e046baade6fc617511646b, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T11:36:41,491 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 117ms, sequenceid=11, compaction requested=false 2024-11-16T11:36:41,491 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T11:36:41,491 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 722ec77409feac066298392be4acfeae 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-16T11:36:41,491 ERROR [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545-prefix:a7948fca2832,38855,1731756975024 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:41,492 WARN [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545-prefix:a7948fca2832,38855,1731756975024 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:41,492 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C38855%2C1731756975024:(num 1731756997335) roll requested 2024-11-16T11:36:41,492 INFO [regionserver/a7948fca2832:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C38855%2C1731756975024.1731757001492 2024-11-16T11:36:41,501 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 newFile=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731757001492 2024-11-16T11:36:41,501 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,502 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,502 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,502 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,502 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,502 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731757001492 2024-11-16T11:36:41,502 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:41,502 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2106001367-172.17.0.2-1731756972475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:41,503 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 2024-11-16T11:36:41,503 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34033:34033),(127.0.0.1/127.0.0.1:41059:41059)] 2024-11-16T11:36:41,503 DEBUG [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 is not closed yet, will try archiving it next time 2024-11-16T11:36:41,503 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 after 0ms 2024-11-16T11:36:41,504 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.1731756997335 to hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/oldWALs/a7948fca2832%2C38855%2C1731756975024.1731756997335 2024-11-16T11:36:41,520 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/default/TestLogRolling-testLogRollOnPipelineRestart/722ec77409feac066298392be4acfeae/.tmp/info/03a16af7d0b94cfc8b4c191a8cb46d2c is 1080, key is row1002/info:/1731756986437/Put/seqid=0 2024-11-16T11:36:41,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741845_1029 (size=9270) 2024-11-16T11:36:41,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741845_1029 (size=9270) 2024-11-16T11:36:41,525 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/default/TestLogRolling-testLogRollOnPipelineRestart/722ec77409feac066298392be4acfeae/.tmp/info/03a16af7d0b94cfc8b4c191a8cb46d2c 2024-11-16T11:36:41,532 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/default/TestLogRolling-testLogRollOnPipelineRestart/722ec77409feac066298392be4acfeae/.tmp/info/03a16af7d0b94cfc8b4c191a8cb46d2c as hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/default/TestLogRolling-testLogRollOnPipelineRestart/722ec77409feac066298392be4acfeae/info/03a16af7d0b94cfc8b4c191a8cb46d2c 2024-11-16T11:36:41,539 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/default/TestLogRolling-testLogRollOnPipelineRestart/722ec77409feac066298392be4acfeae/info/03a16af7d0b94cfc8b4c191a8cb46d2c, entries=4, sequenceid=8, filesize=9.1 K 2024-11-16T11:36:41,540 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 722ec77409feac066298392be4acfeae in 49ms, sequenceid=8, compaction requested=false 2024-11-16T11:36:41,540 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 722ec77409feac066298392be4acfeae: 2024-11-16T11:36:41,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T11:36:41,546 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:36:41,546 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:36:41,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:41,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:41,547 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T11:36:41,547 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T11:36:41,547 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1126613160, stopped=false 2024-11-16T11:36:41,547 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7948fca2832,32837,1731756974859 2024-11-16T11:36:41,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:41,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:41,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:36:41,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:36:41,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:41,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:41,606 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:36:41,607 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:36:41,607 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:36:41,607 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:41,607 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:36:41,607 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:36:41,607 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7948fca2832,38855,1731756975024' ***** 2024-11-16T11:36:41,607 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T11:36:41,607 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T11:36:41,607 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(3091): Received CLOSE for 722ec77409feac066298392be4acfeae 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(959): stopping server a7948fca2832,38855,1731756975024 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7948fca2832:38855. 2024-11-16T11:36:41,608 DEBUG [RS:0;a7948fca2832:38855 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:36:41,608 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 722ec77409feac066298392be4acfeae, disabling compactions & flushes 2024-11-16T11:36:41,608 DEBUG [RS:0;a7948fca2832:38855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:41,608 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:41,608 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T11:36:41,608 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. after waiting 0 ms 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T11:36:41,608 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T11:36:41,608 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T11:36:41,608 DEBUG [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 722ec77409feac066298392be4acfeae=TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae.} 2024-11-16T11:36:41,608 DEBUG [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 722ec77409feac066298392be4acfeae 2024-11-16T11:36:41,608 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:36:41,608 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:36:41,609 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:36:41,609 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:36:41,609 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:36:41,612 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/default/TestLogRolling-testLogRollOnPipelineRestart/722ec77409feac066298392be4acfeae/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-16T11:36:41,613 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:41,613 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 722ec77409feac066298392be4acfeae: Waiting for close lock at 1731757001608Running coprocessor pre-close hooks at 1731757001608Disabling compacts and flushes for region at 1731757001608Disabling writes for close at 1731757001608Writing region close event to WAL at 1731757001609 (+1 ms)Running coprocessor post-close hooks at 1731757001612 (+3 ms)Closed at 1731757001613 (+1 ms) 2024-11-16T11:36:41,613 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731756976385.722ec77409feac066298392be4acfeae. 2024-11-16T11:36:41,613 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T11:36:41,614 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:36:41,614 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:36:41,614 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731757001608Running coprocessor pre-close hooks at 1731757001608Disabling compacts and flushes for region at 1731757001608Disabling writes for close at 1731757001609 (+1 ms)Writing region close event to WAL at 1731757001610 (+1 ms)Running coprocessor post-close hooks at 1731757001614 (+4 ms)Closed at 1731757001614 2024-11-16T11:36:41,614 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T11:36:41,809 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(976): stopping server a7948fca2832,38855,1731756975024; all regions closed. 2024-11-16T11:36:41,809 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,809 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,810 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,810 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,810 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:41,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741840_1023 (size=825) 2024-11-16T11:36:41,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741840_1023 (size=825) 2024-11-16T11:36:42,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:42,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:42,611 INFO [regionserver/a7948fca2832:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T11:36:42,611 INFO [regionserver/a7948fca2832:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T11:36:43,538 INFO [regionserver/a7948fca2832:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:36:43,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:43,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:43,933 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T11:36:44,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:44,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:44,839 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T11:36:45,384 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta after 4001ms 2024-11-16T11:36:45,385 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/WALs/a7948fca2832,38855,1731756975024/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta to hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/oldWALs/a7948fca2832%2C38855%2C1731756975024.meta.1731756976237.meta 2024-11-16T11:36:45,387 DEBUG [RS:0;a7948fca2832:38855 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/oldWALs 2024-11-16T11:36:45,387 INFO [RS:0;a7948fca2832:38855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C38855%2C1731756975024.meta:.meta(num 1731757001374) 2024-11-16T11:36:45,388 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,388 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,388 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,388 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,388 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741844_1028 (size=1162) 2024-11-16T11:36:45,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741844_1028 (size=1162) 2024-11-16T11:36:45,394 DEBUG [RS:0;a7948fca2832:38855 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/oldWALs 2024-11-16T11:36:45,395 INFO [RS:0;a7948fca2832:38855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C38855%2C1731756975024:(num 1731757001492) 2024-11-16T11:36:45,395 DEBUG [RS:0;a7948fca2832:38855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:45,395 INFO [RS:0;a7948fca2832:38855 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:36:45,395 INFO [RS:0;a7948fca2832:38855 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:36:45,395 INFO [RS:0;a7948fca2832:38855 {}] hbase.ChoreService(370): Chore service for: regionserver/a7948fca2832:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T11:36:45,395 INFO [RS:0;a7948fca2832:38855 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:36:45,395 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:36:45,395 INFO [RS:0;a7948fca2832:38855 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38855 2024-11-16T11:36:45,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:36:45,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7948fca2832,38855,1731756975024 2024-11-16T11:36:45,448 INFO [RS:0;a7948fca2832:38855 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:36:45,459 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7948fca2832,38855,1731756975024] 2024-11-16T11:36:45,469 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7948fca2832,38855,1731756975024 already deleted, retry=false 2024-11-16T11:36:45,469 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7948fca2832,38855,1731756975024 expired; onlineServers=0 2024-11-16T11:36:45,469 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7948fca2832,32837,1731756974859' ***** 2024-11-16T11:36:45,469 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T11:36:45,470 INFO [M:0;a7948fca2832:32837 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:36:45,470 INFO [M:0;a7948fca2832:32837 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:36:45,470 DEBUG [M:0;a7948fca2832:32837 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T11:36:45,470 DEBUG [M:0;a7948fca2832:32837 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T11:36:45,470 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T11:36:45,470 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756975385 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731756975385,5,FailOnTimeoutGroup] 2024-11-16T11:36:45,470 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756975385 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731756975385,5,FailOnTimeoutGroup] 2024-11-16T11:36:45,470 INFO [M:0;a7948fca2832:32837 {}] hbase.ChoreService(370): Chore service for: master/a7948fca2832:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T11:36:45,470 INFO [M:0;a7948fca2832:32837 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:36:45,471 DEBUG [M:0;a7948fca2832:32837 {}] master.HMaster(1795): Stopping service threads 2024-11-16T11:36:45,471 INFO [M:0;a7948fca2832:32837 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T11:36:45,471 INFO [M:0;a7948fca2832:32837 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:36:45,471 INFO [M:0;a7948fca2832:32837 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T11:36:45,471 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T11:36:45,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T11:36:45,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:45,480 DEBUG [M:0;a7948fca2832:32837 {}] zookeeper.ZKUtil(347): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T11:36:45,480 WARN [M:0;a7948fca2832:32837 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T11:36:45,481 INFO [M:0;a7948fca2832:32837 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/.lastflushedseqids 2024-11-16T11:36:45,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741846_1030 (size=111) 2024-11-16T11:36:45,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741846_1030 (size=111) 2024-11-16T11:36:45,487 INFO [M:0;a7948fca2832:32837 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T11:36:45,487 INFO [M:0;a7948fca2832:32837 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T11:36:45,487 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:36:45,487 INFO [M:0;a7948fca2832:32837 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:45,487 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:45,487 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:36:45,487 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:45,487 INFO [M:0;a7948fca2832:32837 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-16T11:36:45,487 ERROR [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData-prefix:a7948fca2832,32837,1731756974859 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:45,488 WARN [FSHLog-0-hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData-prefix:a7948fca2832,32837,1731756974859 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:45,488 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog a7948fca2832%2C32837%2C1731756974859:(num 1731756975175) roll requested 2024-11-16T11:36:45,488 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C32837%2C1731756974859.1731757005488 2024-11-16T11:36:45,493 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,493 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,493 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,493 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,493 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,493 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731756975175 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731757005488 2024-11-16T11:36:45,494 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:45,494 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38263,DS-875e16d7-efc4-4fe3-9515-81888e8dff4c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T11:36:45,494 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731756975175 2024-11-16T11:36:45,494 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34033:34033),(127.0.0.1/127.0.0.1:41059:41059)] 2024-11-16T11:36:45,494 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731756975175 is not closed yet, will try archiving it next time 2024-11-16T11:36:45,494 WARN [IPC Server handler 1 on default port 45425 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731756975175 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-16T11:36:45,494 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731756975175 after 0ms 2024-11-16T11:36:45,516 DEBUG [M:0;a7948fca2832:32837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f50134e16d59452289db4cf906b27791 is 82, key is hbase:meta,,1/info:regioninfo/1731756976265/Put/seqid=0 2024-11-16T11:36:45,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741848_1033 (size=5672) 2024-11-16T11:36:45,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741848_1033 (size=5672) 2024-11-16T11:36:45,522 INFO [M:0;a7948fca2832:32837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f50134e16d59452289db4cf906b27791 2024-11-16T11:36:45,545 DEBUG [M:0;a7948fca2832:32837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/188f2f87b3224711928a5ddf2eddbadf is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731756976762/Put/seqid=0 2024-11-16T11:36:45,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741849_1034 (size=6118) 2024-11-16T11:36:45,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741849_1034 (size=6118) 2024-11-16T11:36:45,551 INFO [M:0;a7948fca2832:32837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/188f2f87b3224711928a5ddf2eddbadf 2024-11-16T11:36:45,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:45,559 INFO [RS:0;a7948fca2832:38855 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:36:45,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38855-0x101436e5ea50001, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:45,559 INFO [RS:0;a7948fca2832:38855 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7948fca2832,38855,1731756975024; zookeeper connection closed. 2024-11-16T11:36:45,559 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e49d7be {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e49d7be 2024-11-16T11:36:45,559 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T11:36:45,572 DEBUG [M:0;a7948fca2832:32837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d0db064f8f9a46cc948e703ea3b0dca0 is 69, key is a7948fca2832,38855,1731756975024/rs:state/1731756975514/Put/seqid=0 2024-11-16T11:36:45,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741850_1035 (size=5156) 2024-11-16T11:36:45,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741850_1035 (size=5156) 2024-11-16T11:36:45,578 INFO [M:0;a7948fca2832:32837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d0db064f8f9a46cc948e703ea3b0dca0 2024-11-16T11:36:45,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:45,597 DEBUG [M:0;a7948fca2832:32837 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0bc57a59470c40e1963a91404ead4490 is 52, key is load_balancer_on/state:d/1731756976380/Put/seqid=0 2024-11-16T11:36:45,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741851_1036 (size=5056) 2024-11-16T11:36:45,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741851_1036 (size=5056) 2024-11-16T11:36:45,602 INFO [M:0;a7948fca2832:32837 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0bc57a59470c40e1963a91404ead4490 2024-11-16T11:36:45,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:45,608 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f50134e16d59452289db4cf906b27791 as hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f50134e16d59452289db4cf906b27791 2024-11-16T11:36:45,613 INFO [M:0;a7948fca2832:32837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f50134e16d59452289db4cf906b27791, entries=8, sequenceid=56, filesize=5.5 K 2024-11-16T11:36:45,614 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/188f2f87b3224711928a5ddf2eddbadf as hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/188f2f87b3224711928a5ddf2eddbadf 2024-11-16T11:36:45,620 INFO [M:0;a7948fca2832:32837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/188f2f87b3224711928a5ddf2eddbadf, entries=6, sequenceid=56, filesize=6.0 K 2024-11-16T11:36:45,621 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d0db064f8f9a46cc948e703ea3b0dca0 as hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d0db064f8f9a46cc948e703ea3b0dca0 2024-11-16T11:36:45,627 INFO [M:0;a7948fca2832:32837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d0db064f8f9a46cc948e703ea3b0dca0, entries=1, sequenceid=56, filesize=5.0 K 2024-11-16T11:36:45,628 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0bc57a59470c40e1963a91404ead4490 as hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0bc57a59470c40e1963a91404ead4490 2024-11-16T11:36:45,633 INFO [M:0;a7948fca2832:32837 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0bc57a59470c40e1963a91404ead4490, entries=1, sequenceid=56, filesize=4.9 K 2024-11-16T11:36:45,634 INFO [M:0;a7948fca2832:32837 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=56, compaction requested=false 2024-11-16T11:36:45,636 INFO [M:0;a7948fca2832:32837 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:45,636 DEBUG [M:0;a7948fca2832:32837 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731757005487Disabling compacts and flushes for region at 1731757005487Disabling writes for close at 1731757005487Obtaining lock to block concurrent updates at 1731757005487Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731757005487Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731757005488 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731757005495 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731757005495Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731757005516 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731757005516Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731757005528 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731757005545 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731757005545Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731757005556 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731757005571 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731757005572 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731757005583 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731757005597 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731757005597Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56caa7d1: reopening flushed file at 1731757005607 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ee5d5b5: reopening flushed file at 1731757005613 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53094580: reopening flushed file at 1731757005620 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cba249f: reopening flushed file at 1731757005627 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=56, compaction requested=false at 1731757005634 (+7 ms)Writing region close event to WAL at 1731757005635 (+1 ms)Closed at 1731757005635 2024-11-16T11:36:45,636 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,636 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,636 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,636 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,636 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:36:45,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38713 is added to blk_1073741847_1031 (size=757) 2024-11-16T11:36:45,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43041 is added to blk_1073741847_1031 (size=757) 2024-11-16T11:36:46,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:46,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:46,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:46,933 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T11:36:47,156 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T11:36:47,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,194 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:47,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:47,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:48,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:48,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:49,495 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731756975175 after 4001ms 2024-11-16T11:36:49,496 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/WALs/a7948fca2832,32837,1731756974859/a7948fca2832%2C32837%2C1731756974859.1731756975175 to hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/oldWALs/a7948fca2832%2C32837%2C1731756974859.1731756975175 2024-11-16T11:36:49,500 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/MasterData/oldWALs/a7948fca2832%2C32837%2C1731756974859.1731756975175 to hdfs://localhost:45425/user/jenkins/test-data/133f35ed-3fdd-80df-fbbe-16b40c215545/oldWALs/a7948fca2832%2C32837%2C1731756974859.1731756975175$masterlocalwal$ 2024-11-16T11:36:49,501 INFO [M:0;a7948fca2832:32837 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T11:36:49,501 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:36:49,501 INFO [M:0;a7948fca2832:32837 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32837 2024-11-16T11:36:49,501 INFO [M:0;a7948fca2832:32837 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:36:49,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:49,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:49,654 INFO [M:0;a7948fca2832:32837 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:36:49,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:49,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32837-0x101436e5ea50000, quorum=127.0.0.1:62036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:36:49,656 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d60d3c4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:49,657 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41b83c1e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:49,657 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:49,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64aa4bc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:49,657 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d1eba33{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:49,658 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:36:49,658 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:36:49,659 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:36:49,659 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2106001367-172.17.0.2-1731756972475 (Datanode Uuid 9b5b90c9-dcac-408d-8e33-6dedbaa4afd8) service to localhost/127.0.0.1:45425 2024-11-16T11:36:49,659 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data3/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:49,660 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data4/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:49,660 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:36:49,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f2608b2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:49,662 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1368c96a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:49,662 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:49,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1cdbb868{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:49,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@546b6142{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:49,663 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:36:49,663 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:36:49,664 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:36:49,664 WARN [BP-2106001367-172.17.0.2-1731756972475 heartbeating to localhost/127.0.0.1:45425 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2106001367-172.17.0.2-1731756972475 (Datanode Uuid 287914b4-629f-429f-b417-8fb75fb89957) service to localhost/127.0.0.1:45425 2024-11-16T11:36:49,664 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data1/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:49,665 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/cluster_bd522303-66c0-1bc2-1561-87a1b18cd5aa/data/data2/current/BP-2106001367-172.17.0.2-1731756972475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:36:49,665 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:36:49,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5910f3cc{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:36:49,671 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@656d64e4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:36:49,671 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:36:49,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ca621c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:36:49,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c9126e8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir/,STOPPED} 2024-11-16T11:36:49,678 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T11:36:49,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T11:36:49,704 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45425 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45425 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45425 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:45425 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45425 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:45425 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:45425 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:45425 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=250 (was 239) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3792 (was 4005) 2024-11-16T11:36:49,711 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=250, ProcessCount=11, AvailableMemoryMB=3792 2024-11-16T11:36:49,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T11:36:49,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.log.dir so I do NOT create it in target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57 2024-11-16T11:36:49,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/975dbba1-f434-5b9d-3fec-0a1f76f44ca8/hadoop.tmp.dir so I do NOT create it in target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57 2024-11-16T11:36:49,712 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e, deleteOnExit=true 2024-11-16T11:36:49,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T11:36:49,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/test.cache.data in system properties and HBase conf 2024-11-16T11:36:49,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T11:36:49,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.log.dir in system properties and HBase conf 2024-11-16T11:36:49,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T11:36:49,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T11:36:49,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T11:36:49,712 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T11:36:49,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/nfs.dump.dir in system properties and HBase conf 2024-11-16T11:36:49,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/java.io.tmpdir in system properties and HBase conf 2024-11-16T11:36:49,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:36:49,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T11:36:49,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T11:36:49,727 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:36:50,222 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:50,227 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:50,228 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:50,228 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:50,228 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:36:50,229 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:50,229 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@558e6a7f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:50,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20104ad5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:50,334 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7528c100{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/java.io.tmpdir/jetty-localhost-46849-hadoop-hdfs-3_4_1-tests_jar-_-any-6660524944370305878/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:36:50,335 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20675422{HTTP/1.1, (http/1.1)}{localhost:46849} 2024-11-16T11:36:50,335 INFO [Time-limited test {}] server.Server(415): Started @195557ms 2024-11-16T11:36:50,347 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:36:50,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:50,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:50,674 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:50,677 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:50,677 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:50,677 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:50,677 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:36:50,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e47a0e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:50,681 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21e05b91{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:50,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35783ebf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/java.io.tmpdir/jetty-localhost-40973-hadoop-hdfs-3_4_1-tests_jar-_-any-10464281295053141247/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:50,785 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5acc7ef{HTTP/1.1, (http/1.1)}{localhost:40973} 2024-11-16T11:36:50,785 INFO [Time-limited test {}] server.Server(415): Started @196007ms 2024-11-16T11:36:50,786 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:36:50,817 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:36:50,819 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:36:50,820 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:36:50,820 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:36:50,820 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:36:50,820 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f07a5bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:36:50,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fcb7d58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:36:50,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f25864e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/java.io.tmpdir/jetty-localhost-45187-hadoop-hdfs-3_4_1-tests_jar-_-any-17613059791171790289/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:36:50,927 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@117a5d75{HTTP/1.1, (http/1.1)}{localhost:45187} 2024-11-16T11:36:50,927 INFO [Time-limited test {}] server.Server(415): Started @196149ms 2024-11-16T11:36:50,928 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:36:51,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:36:51,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T11:36:51,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T11:36:51,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T11:36:51,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:51,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:52,031 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/data/data1/current/BP-161704216-172.17.0.2-1731757009739/current, will proceed with Du for space computation calculation, 2024-11-16T11:36:52,031 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/data/data2/current/BP-161704216-172.17.0.2-1731757009739/current, will proceed with Du for space computation calculation, 2024-11-16T11:36:52,061 WARN [Thread-1619 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:36:52,063 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6cf25a9a2426ca25 with lease ID 0x8a490ca1151b52ca: Processing first storage report for DS-acdfd643-3b8d-4b7d-ad9d-d2648605415d from datanode DatanodeRegistration(127.0.0.1:35797, datanodeUuid=7ba2dbc6-adad-46e9-a509-620c83b55fd6, infoPort=39223, infoSecurePort=0, ipcPort=41783, storageInfo=lv=-57;cid=testClusterID;nsid=1841144443;c=1731757009739) 2024-11-16T11:36:52,063 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6cf25a9a2426ca25 with lease ID 0x8a490ca1151b52ca: from storage DS-acdfd643-3b8d-4b7d-ad9d-d2648605415d node DatanodeRegistration(127.0.0.1:35797, datanodeUuid=7ba2dbc6-adad-46e9-a509-620c83b55fd6, infoPort=39223, infoSecurePort=0, ipcPort=41783, storageInfo=lv=-57;cid=testClusterID;nsid=1841144443;c=1731757009739), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:52,063 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6cf25a9a2426ca25 with lease ID 0x8a490ca1151b52ca: Processing first storage report for DS-e4f2cfd9-5c30-4a1f-a3fb-de035d23fb1f from datanode DatanodeRegistration(127.0.0.1:35797, datanodeUuid=7ba2dbc6-adad-46e9-a509-620c83b55fd6, infoPort=39223, infoSecurePort=0, ipcPort=41783, storageInfo=lv=-57;cid=testClusterID;nsid=1841144443;c=1731757009739) 2024-11-16T11:36:52,064 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6cf25a9a2426ca25 with lease ID 0x8a490ca1151b52ca: from storage DS-e4f2cfd9-5c30-4a1f-a3fb-de035d23fb1f node DatanodeRegistration(127.0.0.1:35797, datanodeUuid=7ba2dbc6-adad-46e9-a509-620c83b55fd6, infoPort=39223, infoSecurePort=0, ipcPort=41783, storageInfo=lv=-57;cid=testClusterID;nsid=1841144443;c=1731757009739), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:52,159 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/data/data3/current/BP-161704216-172.17.0.2-1731757009739/current, will proceed with Du for space computation calculation, 2024-11-16T11:36:52,159 WARN [Thread-1667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/data/data4/current/BP-161704216-172.17.0.2-1731757009739/current, will proceed with Du for space computation calculation, 2024-11-16T11:36:52,178 WARN [Thread-1642 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:36:52,180 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3dc2b4e002444517 with lease ID 0x8a490ca1151b52cb: Processing first storage report for DS-c686bd6f-9cd0-46d6-af7d-a97478bb297b from datanode DatanodeRegistration(127.0.0.1:40361, datanodeUuid=342acf04-c7ac-413c-9876-4a722480f6f5, infoPort=38203, infoSecurePort=0, ipcPort=40073, storageInfo=lv=-57;cid=testClusterID;nsid=1841144443;c=1731757009739) 2024-11-16T11:36:52,180 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3dc2b4e002444517 with lease ID 0x8a490ca1151b52cb: from storage DS-c686bd6f-9cd0-46d6-af7d-a97478bb297b node DatanodeRegistration(127.0.0.1:40361, datanodeUuid=342acf04-c7ac-413c-9876-4a722480f6f5, infoPort=38203, infoSecurePort=0, ipcPort=40073, storageInfo=lv=-57;cid=testClusterID;nsid=1841144443;c=1731757009739), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:52,180 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3dc2b4e002444517 with lease ID 0x8a490ca1151b52cb: Processing first storage report for DS-93f7c514-3b47-4176-8537-666474f64343 from datanode DatanodeRegistration(127.0.0.1:40361, datanodeUuid=342acf04-c7ac-413c-9876-4a722480f6f5, infoPort=38203, infoSecurePort=0, ipcPort=40073, storageInfo=lv=-57;cid=testClusterID;nsid=1841144443;c=1731757009739) 2024-11-16T11:36:52,180 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3dc2b4e002444517 with lease ID 0x8a490ca1151b52cb: from storage DS-93f7c514-3b47-4176-8537-666474f64343 node DatanodeRegistration(127.0.0.1:40361, datanodeUuid=342acf04-c7ac-413c-9876-4a722480f6f5, infoPort=38203, infoSecurePort=0, ipcPort=40073, storageInfo=lv=-57;cid=testClusterID;nsid=1841144443;c=1731757009739), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:36:52,259 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57 2024-11-16T11:36:52,262 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/zookeeper_0, clientPort=63364, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T11:36:52,262 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63364 2024-11-16T11:36:52,263 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:52,264 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:52,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:36:52,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:36:52,275 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467 with version=8 2024-11-16T11:36:52,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/hbase-staging 2024-11-16T11:36:52,277 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:36:52,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:52,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:52,277 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:36:52,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:52,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:36:52,277 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T11:36:52,277 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:36:52,278 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43583 2024-11-16T11:36:52,280 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43583 connecting to ZooKeeper ensemble=127.0.0.1:63364 2024-11-16T11:36:52,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:435830x0, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:36:52,327 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43583-0x101436ef0d00000 connected 2024-11-16T11:36:52,411 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:52,413 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:52,417 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:36:52,417 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467, hbase.cluster.distributed=false 2024-11-16T11:36:52,419 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:36:52,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43583 2024-11-16T11:36:52,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43583 2024-11-16T11:36:52,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43583 2024-11-16T11:36:52,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43583 2024-11-16T11:36:52,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43583 2024-11-16T11:36:52,445 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:36:52,445 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:52,445 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:52,445 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:36:52,445 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:36:52,445 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:36:52,445 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T11:36:52,445 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:36:52,446 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33179 2024-11-16T11:36:52,448 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33179 connecting to ZooKeeper ensemble=127.0.0.1:63364 2024-11-16T11:36:52,449 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:52,451 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:52,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331790x0, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:36:52,464 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:331790x0, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:36:52,464 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33179-0x101436ef0d00001 connected 2024-11-16T11:36:52,464 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T11:36:52,465 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T11:36:52,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T11:36:52,466 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:36:52,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33179 2024-11-16T11:36:52,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33179 2024-11-16T11:36:52,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33179 2024-11-16T11:36:52,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33179 2024-11-16T11:36:52,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33179 2024-11-16T11:36:52,479 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7948fca2832:43583 2024-11-16T11:36:52,479 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7948fca2832,43583,1731757012277 2024-11-16T11:36:52,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:36:52,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:36:52,491 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7948fca2832,43583,1731757012277 2024-11-16T11:36:52,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T11:36:52,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,501 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T11:36:52,502 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7948fca2832,43583,1731757012277 from backup master directory 2024-11-16T11:36:52,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7948fca2832,43583,1731757012277 2024-11-16T11:36:52,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:36:52,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:36:52,511 WARN [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:36:52,511 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7948fca2832,43583,1731757012277 2024-11-16T11:36:52,516 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/hbase.id] with ID: 8da68809-3bba-4e93-a2c4-13d8f0b0bc31 2024-11-16T11:36:52,516 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/.tmp/hbase.id 2024-11-16T11:36:52,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:36:52,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:36:52,524 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/.tmp/hbase.id]:[hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/hbase.id] 2024-11-16T11:36:52,536 INFO [master/a7948fca2832:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:52,536 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T11:36:52,537 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T11:36:52,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:36:52,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:36:52,556 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:36:52,557 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T11:36:52,557 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:36:52,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:36:52,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:36:52,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store 2024-11-16T11:36:52,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:36:52,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:36:52,573 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:52,573 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:36:52,573 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:52,573 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:52,573 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:36:52,573 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:52,573 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:36:52,573 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731757012573Disabling compacts and flushes for region at 1731757012573Disabling writes for close at 1731757012573Writing region close event to WAL at 1731757012573Closed at 1731757012573 2024-11-16T11:36:52,574 WARN [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/.initializing 2024-11-16T11:36:52,574 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/WALs/a7948fca2832,43583,1731757012277 2024-11-16T11:36:52,577 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C43583%2C1731757012277, suffix=, logDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/WALs/a7948fca2832,43583,1731757012277, archiveDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/oldWALs, maxLogs=10 2024-11-16T11:36:52,577 INFO [master/a7948fca2832:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C43583%2C1731757012277.1731757012577 2024-11-16T11:36:52,586 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/WALs/a7948fca2832,43583,1731757012277/a7948fca2832%2C43583%2C1731757012277.1731757012577 2024-11-16T11:36:52,586 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38203:38203),(127.0.0.1/127.0.0.1:39223:39223)] 2024-11-16T11:36:52,587 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:36:52,587 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:52,587 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,588 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,589 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:52,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T11:36:52,590 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:52,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:52,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T11:36:52,592 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:52,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:36:52,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T11:36:52,594 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:52,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:36:52,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T11:36:52,595 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:52,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:36:52,596 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,597 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,597 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,598 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,599 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,599 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T11:36:52,600 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:36:52,602 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:36:52,603 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864450, jitterRate=0.09920619428157806}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T11:36:52,603 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731757012588Initializing all the Stores at 1731757012588Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757012588Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757012589 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757012589Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757012589Cleaning up temporary data from old regions at 1731757012599 (+10 ms)Region opened successfully at 1731757012603 (+4 ms) 2024-11-16T11:36:52,603 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T11:36:52,606 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f4a532a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:36:52,607 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T11:36:52,607 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T11:36:52,607 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T11:36:52,607 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T11:36:52,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:52,608 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T11:36:52,608 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T11:36:52,608 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T11:36:52,611 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T11:36:52,611 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T11:36:52,621 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T11:36:52,622 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T11:36:52,622 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T11:36:52,632 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T11:36:52,632 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T11:36:52,634 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T11:36:52,642 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T11:36:52,643 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T11:36:52,653 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T11:36:52,656 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T11:36:52,663 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T11:36:52,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:36:52,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:36:52,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,674 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7948fca2832,43583,1731757012277, sessionid=0x101436ef0d00000, setting cluster-up flag (Was=false) 2024-11-16T11:36:52,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,727 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T11:36:52,728 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,43583,1731757012277 2024-11-16T11:36:52,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:52,779 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T11:36:52,780 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,43583,1731757012277 2024-11-16T11:36:52,781 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T11:36:52,783 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T11:36:52,783 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T11:36:52,783 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T11:36:52,783 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7948fca2832,43583,1731757012277 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T11:36:52,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:36:52,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:36:52,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:36:52,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:36:52,785 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7948fca2832:0, corePoolSize=10, maxPoolSize=10 2024-11-16T11:36:52,785 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,785 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:36:52,785 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,786 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731757042786 2024-11-16T11:36:52,786 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T11:36:52,786 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T11:36:52,786 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T11:36:52,786 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T11:36:52,786 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T11:36:52,786 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:36:52,786 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T11:36:52,786 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T11:36:52,787 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,787 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T11:36:52,787 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T11:36:52,787 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T11:36:52,787 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T11:36:52,787 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T11:36:52,787 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731757012787,5,FailOnTimeoutGroup] 2024-11-16T11:36:52,787 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:52,788 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731757012787,5,FailOnTimeoutGroup] 2024-11-16T11:36:52,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T11:36:52,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,788 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T11:36:52,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:36:52,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:36:52,797 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T11:36:52,797 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467 2024-11-16T11:36:52,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:36:52,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:36:52,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:52,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:36:52,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:36:52,808 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:52,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:52,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:36:52,810 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:36:52,810 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:52,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:52,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:36:52,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:36:52,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:52,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:52,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:36:52,814 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:36:52,814 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:52,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:52,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:36:52,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740 2024-11-16T11:36:52,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740 2024-11-16T11:36:52,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:36:52,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:36:52,818 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:36:52,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:36:52,821 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:36:52,821 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=867445, jitterRate=0.1030135303735733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:36:52,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731757012806Initializing all the Stores at 1731757012806Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757012806Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757012807 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757012807Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757012807Cleaning up temporary data from old regions at 1731757012817 (+10 ms)Region opened successfully at 1731757012822 (+5 ms) 2024-11-16T11:36:52,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:36:52,822 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:36:52,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:36:52,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:36:52,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:36:52,823 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:36:52,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731757012822Disabling compacts and flushes for region at 1731757012822Disabling writes for close at 1731757012822Writing region close event to WAL at 1731757012823 (+1 ms)Closed at 1731757012823 2024-11-16T11:36:52,824 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:36:52,824 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T11:36:52,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T11:36:52,826 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:36:52,827 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T11:36:52,869 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(746): ClusterId : 8da68809-3bba-4e93-a2c4-13d8f0b0bc31 2024-11-16T11:36:52,869 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T11:36:52,880 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T11:36:52,880 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T11:36:52,891 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T11:36:52,891 DEBUG [RS:0;a7948fca2832:33179 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2498a287, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:36:52,903 DEBUG [RS:0;a7948fca2832:33179 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7948fca2832:33179 2024-11-16T11:36:52,903 INFO [RS:0;a7948fca2832:33179 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T11:36:52,903 INFO [RS:0;a7948fca2832:33179 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T11:36:52,903 DEBUG [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T11:36:52,904 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7948fca2832,43583,1731757012277 with port=33179, startcode=1731757012444 2024-11-16T11:36:52,904 DEBUG [RS:0;a7948fca2832:33179 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T11:36:52,906 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47305, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T11:36:52,906 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43583 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7948fca2832,33179,1731757012444 2024-11-16T11:36:52,906 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43583 {}] master.ServerManager(517): Registering regionserver=a7948fca2832,33179,1731757012444 2024-11-16T11:36:52,908 DEBUG [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467 2024-11-16T11:36:52,908 DEBUG [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33487 2024-11-16T11:36:52,908 DEBUG [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T11:36:52,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:36:52,917 DEBUG [RS:0;a7948fca2832:33179 {}] zookeeper.ZKUtil(111): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7948fca2832,33179,1731757012444 2024-11-16T11:36:52,917 WARN [RS:0;a7948fca2832:33179 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:36:52,917 INFO [RS:0;a7948fca2832:33179 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:36:52,917 DEBUG [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444 2024-11-16T11:36:52,917 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7948fca2832,33179,1731757012444] 2024-11-16T11:36:52,920 INFO [RS:0;a7948fca2832:33179 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T11:36:52,922 INFO [RS:0;a7948fca2832:33179 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T11:36:52,922 INFO [RS:0;a7948fca2832:33179 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T11:36:52,922 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,922 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T11:36:52,923 INFO [RS:0;a7948fca2832:33179 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T11:36:52,923 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,923 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,923 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,923 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:36:52,924 DEBUG [RS:0;a7948fca2832:33179 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:36:52,925 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,925 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,925 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,925 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,925 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,925 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,33179,1731757012444-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:36:52,941 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T11:36:52,941 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,33179,1731757012444-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,941 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,941 INFO [RS:0;a7948fca2832:33179 {}] regionserver.Replication(171): a7948fca2832,33179,1731757012444 started 2024-11-16T11:36:52,956 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:52,957 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(1482): Serving as a7948fca2832,33179,1731757012444, RpcServer on a7948fca2832/172.17.0.2:33179, sessionid=0x101436ef0d00001 2024-11-16T11:36:52,957 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T11:36:52,957 DEBUG [RS:0;a7948fca2832:33179 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7948fca2832,33179,1731757012444 2024-11-16T11:36:52,957 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,33179,1731757012444' 2024-11-16T11:36:52,957 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T11:36:52,957 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T11:36:52,958 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T11:36:52,958 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T11:36:52,958 DEBUG [RS:0;a7948fca2832:33179 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7948fca2832,33179,1731757012444 2024-11-16T11:36:52,958 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,33179,1731757012444' 2024-11-16T11:36:52,958 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T11:36:52,958 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T11:36:52,959 DEBUG [RS:0;a7948fca2832:33179 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T11:36:52,959 INFO [RS:0;a7948fca2832:33179 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T11:36:52,959 INFO [RS:0;a7948fca2832:33179 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T11:36:52,977 WARN [a7948fca2832:43583 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T11:36:53,060 INFO [RS:0;a7948fca2832:33179 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C33179%2C1731757012444, suffix=, logDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444, archiveDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/oldWALs, maxLogs=32 2024-11-16T11:36:53,061 INFO [RS:0;a7948fca2832:33179 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C33179%2C1731757012444.1731757013061 2024-11-16T11:36:53,066 INFO [RS:0;a7948fca2832:33179 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757013061 2024-11-16T11:36:53,068 DEBUG [RS:0;a7948fca2832:33179 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39223:39223),(127.0.0.1/127.0.0.1:38203:38203)] 2024-11-16T11:36:53,227 DEBUG [a7948fca2832:43583 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T11:36:53,228 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7948fca2832,33179,1731757012444 2024-11-16T11:36:53,230 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,33179,1731757012444, state=OPENING 2024-11-16T11:36:53,242 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T11:36:53,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:53,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:36:53,254 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:36:53,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,33179,1731757012444}] 2024-11-16T11:36:53,254 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:36:53,254 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:36:53,407 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T11:36:53,410 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33707, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T11:36:53,415 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T11:36:53,415 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:36:53,418 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C33179%2C1731757012444.meta, suffix=.meta, logDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444, archiveDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/oldWALs, maxLogs=32 2024-11-16T11:36:53,419 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C33179%2C1731757012444.meta.1731757013419.meta 2024-11-16T11:36:53,425 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.meta.1731757013419.meta 2024-11-16T11:36:53,427 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38203:38203),(127.0.0.1/127.0.0.1:39223:39223)] 2024-11-16T11:36:53,427 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:36:53,428 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T11:36:53,428 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T11:36:53,428 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T11:36:53,428 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T11:36:53,428 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:53,428 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T11:36:53,428 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T11:36:53,430 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:36:53,430 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:36:53,431 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:53,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:53,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:36:53,432 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:36:53,432 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:53,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:53,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:36:53,433 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:36:53,433 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:53,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:53,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:36:53,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:36:53,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:53,435 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:36:53,435 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:36:53,436 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740 2024-11-16T11:36:53,437 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740 2024-11-16T11:36:53,438 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:36:53,438 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:36:53,438 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:36:53,439 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:36:53,440 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864194, jitterRate=0.09888052940368652}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:36:53,440 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T11:36:53,441 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731757013428Writing region info on filesystem at 1731757013428Initializing all the Stores at 1731757013429 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757013429Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757013429Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757013429Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757013429Cleaning up temporary data from old regions at 1731757013438 (+9 ms)Running coprocessor post-open hooks at 1731757013440 (+2 ms)Region opened successfully at 1731757013440 2024-11-16T11:36:53,441 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731757013407 2024-11-16T11:36:53,444 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T11:36:53,444 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T11:36:53,445 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,33179,1731757012444 2024-11-16T11:36:53,446 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,33179,1731757012444, state=OPEN 2024-11-16T11:36:53,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:36:53,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:36:53,485 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7948fca2832,33179,1731757012444 2024-11-16T11:36:53,485 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:36:53,485 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:36:53,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T11:36:53,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,33179,1731757012444 in 231 msec 2024-11-16T11:36:53,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T11:36:53,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 665 msec 2024-11-16T11:36:53,494 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:36:53,494 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T11:36:53,495 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:36:53,495 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,33179,1731757012444, seqNum=-1] 2024-11-16T11:36:53,495 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:36:53,497 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34649, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:36:53,502 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 720 msec 2024-11-16T11:36:53,502 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731757013502, completionTime=-1 2024-11-16T11:36:53,502 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T11:36:53,503 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T11:36:53,504 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T11:36:53,505 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731757073505 2024-11-16T11:36:53,505 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731757133505 2024-11-16T11:36:53,505 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T11:36:53,505 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,43583,1731757012277-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:53,505 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,43583,1731757012277-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:53,505 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,43583,1731757012277-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:53,505 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7948fca2832:43583, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:53,505 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:53,505 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:53,507 DEBUG [master/a7948fca2832:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T11:36:53,509 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.998sec 2024-11-16T11:36:53,509 INFO [master/a7948fca2832:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T11:36:53,509 INFO [master/a7948fca2832:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T11:36:53,509 INFO [master/a7948fca2832:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T11:36:53,509 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T11:36:53,509 INFO [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T11:36:53,509 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,43583,1731757012277-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:36:53,509 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,43583,1731757012277-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T11:36:53,511 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T11:36:53,511 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T11:36:53,511 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,43583,1731757012277-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:36:53,570 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e0f54ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:36:53,570 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7948fca2832,43583,-1 for getting cluster id 2024-11-16T11:36:53,570 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T11:36:53,572 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8da68809-3bba-4e93-a2c4-13d8f0b0bc31' 2024-11-16T11:36:53,573 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T11:36:53,573 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8da68809-3bba-4e93-a2c4-13d8f0b0bc31" 2024-11-16T11:36:53,573 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18f6058e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:36:53,573 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7948fca2832,43583,-1] 2024-11-16T11:36:53,574 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T11:36:53,574 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:36:53,575 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46570, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T11:36:53,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3365df86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:36:53,577 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:36:53,578 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,33179,1731757012444, seqNum=-1] 2024-11-16T11:36:53,578 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:36:53,579 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51712, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:36:53,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7948fca2832,43583,1731757012277 2024-11-16T11:36:53,581 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:36:53,584 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T11:36:53,585 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T11:36:53,586 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is a7948fca2832,43583,1731757012277 2024-11-16T11:36:53,586 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@13640fbc 2024-11-16T11:36:53,586 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T11:36:53,587 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46586, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T11:36:53,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T11:36:53,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T11:36:53,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:36:53,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:36:53,591 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T11:36:53,591 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:53,591 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-16T11:36:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:36:53,592 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T11:36:53,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741835_1011 (size=405) 2024-11-16T11:36:53,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741835_1011 (size=405) 2024-11-16T11:36:53,605 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => dd0873104aa8588885420bd54efd8f72, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467 2024-11-16T11:36:53,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:53,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741836_1012 (size=88) 2024-11-16T11:36:53,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741836_1012 (size=88) 2024-11-16T11:36:53,611 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:53,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing dd0873104aa8588885420bd54efd8f72, disabling compactions & flushes 2024-11-16T11:36:53,612 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:36:53,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:36:53,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. after waiting 0 ms 2024-11-16T11:36:53,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:36:53,612 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:36:53,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for dd0873104aa8588885420bd54efd8f72: Waiting for close lock at 1731757013612Disabling compacts and flushes for region at 1731757013612Disabling writes for close at 1731757013612Writing region close event to WAL at 1731757013612Closed at 1731757013612 2024-11-16T11:36:53,613 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T11:36:53,613 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731757013613"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731757013613"}]},"ts":"1731757013613"} 2024-11-16T11:36:53,616 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T11:36:53,617 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T11:36:53,617 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731757013617"}]},"ts":"1731757013617"} 2024-11-16T11:36:53,619 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-16T11:36:53,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=dd0873104aa8588885420bd54efd8f72, ASSIGN}] 2024-11-16T11:36:53,621 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=dd0873104aa8588885420bd54efd8f72, ASSIGN 2024-11-16T11:36:53,622 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=dd0873104aa8588885420bd54efd8f72, ASSIGN; state=OFFLINE, location=a7948fca2832,33179,1731757012444; forceNewPlan=false, retain=false 2024-11-16T11:36:53,773 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dd0873104aa8588885420bd54efd8f72, regionState=OPENING, regionLocation=a7948fca2832,33179,1731757012444 2024-11-16T11:36:53,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=dd0873104aa8588885420bd54efd8f72, ASSIGN because future has completed 2024-11-16T11:36:53,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dd0873104aa8588885420bd54efd8f72, server=a7948fca2832,33179,1731757012444}] 2024-11-16T11:36:53,932 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:36:53,932 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => dd0873104aa8588885420bd54efd8f72, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:36:53,933 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,933 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:36:53,933 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,933 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,934 INFO [StoreOpener-dd0873104aa8588885420bd54efd8f72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,936 INFO [StoreOpener-dd0873104aa8588885420bd54efd8f72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dd0873104aa8588885420bd54efd8f72 columnFamilyName info 2024-11-16T11:36:53,936 DEBUG [StoreOpener-dd0873104aa8588885420bd54efd8f72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:36:53,936 INFO [StoreOpener-dd0873104aa8588885420bd54efd8f72-1 {}] regionserver.HStore(327): Store=dd0873104aa8588885420bd54efd8f72/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:36:53,936 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,937 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,938 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,938 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,938 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,940 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,942 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:36:53,942 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened dd0873104aa8588885420bd54efd8f72; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734324, jitterRate=-0.06625977158546448}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T11:36:53,943 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dd0873104aa8588885420bd54efd8f72 2024-11-16T11:36:53,943 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for dd0873104aa8588885420bd54efd8f72: Running coprocessor pre-open hook at 1731757013933Writing region info on filesystem at 1731757013933Initializing all the Stores at 1731757013934 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757013934Cleaning up temporary data from old regions at 1731757013938 (+4 ms)Running coprocessor post-open hooks at 1731757013943 (+5 ms)Region opened successfully at 1731757013943 2024-11-16T11:36:53,945 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72., pid=6, masterSystemTime=1731757013928 2024-11-16T11:36:53,947 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:36:53,947 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:36:53,949 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dd0873104aa8588885420bd54efd8f72, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,33179,1731757012444 2024-11-16T11:36:53,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dd0873104aa8588885420bd54efd8f72, server=a7948fca2832,33179,1731757012444 because future has completed 2024-11-16T11:36:53,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T11:36:53,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure dd0873104aa8588885420bd54efd8f72, server=a7948fca2832,33179,1731757012444 in 177 msec 2024-11-16T11:36:53,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T11:36:53,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=dd0873104aa8588885420bd54efd8f72, ASSIGN in 336 msec 2024-11-16T11:36:53,960 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T11:36:53,960 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731757013960"}]},"ts":"1731757013960"} 2024-11-16T11:36:53,963 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-16T11:36:53,964 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T11:36:53,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 376 msec 2024-11-16T11:36:54,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:54,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:55,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:55,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:56,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:56,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:56,640 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T11:36:56,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:56,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:36:57,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:57,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:58,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:58,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:58,920 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T11:36:58,921 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-16T11:36:59,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:36:59,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:00,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:00,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:01,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T11:37:01,137 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T11:37:01,138 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:37:01,138 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T11:37:01,138 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T11:37:01,138 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T11:37:01,138 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:01,138 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T11:37:01,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:01,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:02,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:02,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:03,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:03,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:03,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:37:03,627 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T11:37:03,627 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-16T11:37:03,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:03,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:03,635 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72., hostname=a7948fca2832,33179,1731757012444, seqNum=2] 2024-11-16T11:37:03,642 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:03,648 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T11:37:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T11:37:03,649 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T11:37:03,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T11:37:03,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33179 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-16T11:37:03,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:03,816 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing dd0873104aa8588885420bd54efd8f72 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T11:37:03,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/77f9969b6d01492994bd1cdbc978ae5c is 1080, key is row0001/info:/1731757023636/Put/seqid=0 2024-11-16T11:37:03,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741837_1013 (size=6033) 2024-11-16T11:37:03,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741837_1013 (size=6033) 2024-11-16T11:37:03,841 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/77f9969b6d01492994bd1cdbc978ae5c 2024-11-16T11:37:03,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/77f9969b6d01492994bd1cdbc978ae5c as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/77f9969b6d01492994bd1cdbc978ae5c 2024-11-16T11:37:03,854 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/77f9969b6d01492994bd1cdbc978ae5c, entries=1, sequenceid=5, filesize=5.9 K 2024-11-16T11:37:03,855 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for dd0873104aa8588885420bd54efd8f72 in 39ms, sequenceid=5, compaction requested=false 2024-11-16T11:37:03,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for dd0873104aa8588885420bd54efd8f72: 2024-11-16T11:37:03,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:03,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-16T11:37:03,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-16T11:37:03,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T11:37:03,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 209 msec 2024-11-16T11:37:03,866 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 220 msec 2024-11-16T11:37:04,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:04,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:05,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:05,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:06,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:06,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:07,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:07,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:08,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:08,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:09,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:09,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:10,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:10,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:11,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:11,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:12,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:12,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 after 68074ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:37:12,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:12,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta after 68046ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T11:37:13,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:13,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T11:37:13,716 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T11:37:13,719 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T11:37:13,722 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T11:37:13,724 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T11:37:13,724 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T11:37:13,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33179 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-16T11:37:13,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:13,878 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing dd0873104aa8588885420bd54efd8f72 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T11:37:13,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/9a017ec2e7f04cc293f2b94639fecd15 is 1080, key is row0002/info:/1731757033718/Put/seqid=0 2024-11-16T11:37:13,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741838_1014 (size=6033) 2024-11-16T11:37:13,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741838_1014 (size=6033) 2024-11-16T11:37:14,293 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/9a017ec2e7f04cc293f2b94639fecd15 2024-11-16T11:37:14,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/9a017ec2e7f04cc293f2b94639fecd15 as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/9a017ec2e7f04cc293f2b94639fecd15 2024-11-16T11:37:14,308 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/9a017ec2e7f04cc293f2b94639fecd15, entries=1, sequenceid=9, filesize=5.9 K 2024-11-16T11:37:14,309 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for dd0873104aa8588885420bd54efd8f72 in 431ms, sequenceid=9, compaction requested=false 2024-11-16T11:37:14,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for dd0873104aa8588885420bd54efd8f72: 2024-11-16T11:37:14,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:14,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-16T11:37:14,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-16T11:37:14,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-16T11:37:14,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 587 msec 2024-11-16T11:37:14,316 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 595 msec 2024-11-16T11:37:14,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:14,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:15,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:15,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:16,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:16,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:17,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:17,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:18,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:18,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:19,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:19,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:20,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:20,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:21,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:21,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:22,259 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T11:37:22,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:22,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:23,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:23,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:23,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T11:37:23,786 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T11:37:23,790 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C33179%2C1731757012444.1731757043790 2024-11-16T11:37:23,797 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:23,798 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:23,798 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:23,798 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:23,798 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:23,798 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757013061 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757043790 2024-11-16T11:37:23,800 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38203:38203),(127.0.0.1/127.0.0.1:39223:39223)] 2024-11-16T11:37:23,800 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757013061 is not closed yet, will try archiving it next time 2024-11-16T11:37:23,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741833_1009 (size=5546) 2024-11-16T11:37:23,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741833_1009 (size=5546) 2024-11-16T11:37:23,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:23,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:23,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T11:37:23,808 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T11:37:23,809 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T11:37:23,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T11:37:23,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33179 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-16T11:37:23,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:23,963 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing dd0873104aa8588885420bd54efd8f72 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T11:37:23,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/45b347a38a424e81b07c26e0956be896 is 1080, key is row0003/info:/1731757043788/Put/seqid=0 2024-11-16T11:37:23,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741840_1016 (size=6033) 2024-11-16T11:37:23,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741840_1016 (size=6033) 2024-11-16T11:37:23,977 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/45b347a38a424e81b07c26e0956be896 2024-11-16T11:37:23,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/45b347a38a424e81b07c26e0956be896 as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/45b347a38a424e81b07c26e0956be896 2024-11-16T11:37:24,041 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/45b347a38a424e81b07c26e0956be896, entries=1, sequenceid=13, filesize=5.9 K 2024-11-16T11:37:24,043 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for dd0873104aa8588885420bd54efd8f72 in 80ms, sequenceid=13, compaction requested=true 2024-11-16T11:37:24,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for dd0873104aa8588885420bd54efd8f72: 2024-11-16T11:37:24,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:24,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-16T11:37:24,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-16T11:37:24,052 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-16T11:37:24,052 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 238 msec 2024-11-16T11:37:24,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 249 msec 2024-11-16T11:37:24,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:24,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:25,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:25,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:26,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:26,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:27,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:27,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:28,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:28,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:29,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:29,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:30,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:30,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:31,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:31,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:32,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:32,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:33,576 INFO [master/a7948fca2832:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T11:37:33,576 INFO [master/a7948fca2832:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T11:37:33,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:33,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:33,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T11:37:33,886 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T11:37:33,886 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:37:33,888 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:37:33,888 DEBUG [Time-limited test {}] regionserver.HStore(1541): dd0873104aa8588885420bd54efd8f72/info is initiating minor compaction (all files) 2024-11-16T11:37:33,888 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T11:37:33,888 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:33,888 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of dd0873104aa8588885420bd54efd8f72/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:33,888 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/77f9969b6d01492994bd1cdbc978ae5c, hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/9a017ec2e7f04cc293f2b94639fecd15, hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/45b347a38a424e81b07c26e0956be896] into tmpdir=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp, totalSize=17.7 K 2024-11-16T11:37:33,889 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 77f9969b6d01492994bd1cdbc978ae5c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731757023636 2024-11-16T11:37:33,889 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9a017ec2e7f04cc293f2b94639fecd15, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731757033718 2024-11-16T11:37:33,890 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 45b347a38a424e81b07c26e0956be896, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731757043788 2024-11-16T11:37:33,902 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): dd0873104aa8588885420bd54efd8f72#info#compaction#47 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:37:33,903 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/eae79d21077749adb2d7b8df398bd7eb is 1080, key is row0001/info:/1731757023636/Put/seqid=0 2024-11-16T11:37:33,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741841_1017 (size=8296) 2024-11-16T11:37:33,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741841_1017 (size=8296) 2024-11-16T11:37:33,915 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/eae79d21077749adb2d7b8df398bd7eb as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/eae79d21077749adb2d7b8df398bd7eb 2024-11-16T11:37:33,922 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dd0873104aa8588885420bd54efd8f72/info of dd0873104aa8588885420bd54efd8f72 into eae79d21077749adb2d7b8df398bd7eb(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:37:33,922 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for dd0873104aa8588885420bd54efd8f72: 2024-11-16T11:37:33,925 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C33179%2C1731757012444.1731757053924 2024-11-16T11:37:33,930 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:33,930 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:33,930 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:33,931 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:33,931 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:33,931 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757043790 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757053924 2024-11-16T11:37:33,932 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38203:38203),(127.0.0.1/127.0.0.1:39223:39223)] 2024-11-16T11:37:33,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741839_1015 (size=2520) 2024-11-16T11:37:33,932 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757043790 is not closed yet, will try archiving it next time 2024-11-16T11:37:33,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741839_1015 (size=2520) 2024-11-16T11:37:33,933 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757013061 to hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/oldWALs/a7948fca2832%2C33179%2C1731757012444.1731757013061 2024-11-16T11:37:33,934 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:33,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:33,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T11:37:33,936 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T11:37:33,937 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T11:37:33,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T11:37:34,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33179 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-16T11:37:34,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:34,090 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing dd0873104aa8588885420bd54efd8f72 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T11:37:34,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/cee72b282e374a33b94e02f3e5240eb9 is 1080, key is row0000/info:/1731757053923/Put/seqid=0 2024-11-16T11:37:34,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741843_1019 (size=6033) 2024-11-16T11:37:34,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741843_1019 (size=6033) 2024-11-16T11:37:34,152 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/cee72b282e374a33b94e02f3e5240eb9 2024-11-16T11:37:34,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/cee72b282e374a33b94e02f3e5240eb9 as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/cee72b282e374a33b94e02f3e5240eb9 2024-11-16T11:37:34,167 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/cee72b282e374a33b94e02f3e5240eb9, entries=1, sequenceid=18, filesize=5.9 K 2024-11-16T11:37:34,168 INFO [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for dd0873104aa8588885420bd54efd8f72 in 78ms, sequenceid=18, compaction requested=false 2024-11-16T11:37:34,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for dd0873104aa8588885420bd54efd8f72: 2024-11-16T11:37:34,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:34,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-16T11:37:34,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-16T11:37:34,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-16T11:37:34,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 233 msec 2024-11-16T11:37:34,175 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 240 msec 2024-11-16T11:37:34,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:34,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:35,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:35,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:36,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:36,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:37,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:37,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:38,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:38,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:38,933 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region dd0873104aa8588885420bd54efd8f72, had cached 0 bytes from a total of 14329 2024-11-16T11:37:39,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:39,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:40,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:40,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:41,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:41,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:42,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:42,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:43,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:43,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:43,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43583 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T11:37:43,996 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T11:37:43,999 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C33179%2C1731757012444.1731757063999 2024-11-16T11:37:44,007 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,007 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,007 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,007 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,007 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,008 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757053924 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757063999 2024-11-16T11:37:44,008 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39223:39223),(127.0.0.1/127.0.0.1:38203:38203)] 2024-11-16T11:37:44,008 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757053924 is not closed yet, will try archiving it next time 2024-11-16T11:37:44,008 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/WALs/a7948fca2832,33179,1731757012444/a7948fca2832%2C33179%2C1731757012444.1731757043790 to hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/oldWALs/a7948fca2832%2C33179%2C1731757012444.1731757043790 2024-11-16T11:37:44,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T11:37:44,009 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:37:44,009 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:37:44,009 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:37:44,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741842_1018 (size=2026) 2024-11-16T11:37:44,009 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:37:44,009 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T11:37:44,009 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T11:37:44,009 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1262485124, stopped=false 2024-11-16T11:37:44,009 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7948fca2832,43583,1731757012277 2024-11-16T11:37:44,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741842_1018 (size=2026) 2024-11-16T11:37:44,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:37:44,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:37:44,071 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:37:44,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:44,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:44,071 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:37:44,071 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:37:44,071 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:37:44,072 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:37:44,072 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7948fca2832,33179,1731757012444' ***** 2024-11-16T11:37:44,072 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T11:37:44,072 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:37:44,072 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T11:37:44,072 INFO [RS:0;a7948fca2832:33179 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T11:37:44,072 INFO [RS:0;a7948fca2832:33179 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T11:37:44,073 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(3091): Received CLOSE for dd0873104aa8588885420bd54efd8f72 2024-11-16T11:37:44,073 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T11:37:44,073 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(959): stopping server a7948fca2832,33179,1731757012444 2024-11-16T11:37:44,073 INFO [RS:0;a7948fca2832:33179 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:37:44,073 INFO [RS:0;a7948fca2832:33179 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7948fca2832:33179. 2024-11-16T11:37:44,073 DEBUG [RS:0;a7948fca2832:33179 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:37:44,073 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing dd0873104aa8588885420bd54efd8f72, disabling compactions & flushes 2024-11-16T11:37:44,073 DEBUG [RS:0;a7948fca2832:33179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:37:44,074 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:44,074 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:44,074 INFO [RS:0;a7948fca2832:33179 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T11:37:44,074 INFO [RS:0;a7948fca2832:33179 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T11:37:44,074 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. after waiting 0 ms 2024-11-16T11:37:44,074 INFO [RS:0;a7948fca2832:33179 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T11:37:44,074 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:44,074 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T11:37:44,074 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing dd0873104aa8588885420bd54efd8f72 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T11:37:44,074 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T11:37:44,074 DEBUG [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, dd0873104aa8588885420bd54efd8f72=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.} 2024-11-16T11:37:44,074 DEBUG [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, dd0873104aa8588885420bd54efd8f72 2024-11-16T11:37:44,074 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:37:44,074 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:37:44,075 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:37:44,075 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:37:44,075 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:37:44,075 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-16T11:37:44,080 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/8e9717985c1e4ff8bf0cdecccf0d0cf9 is 1080, key is row0001/info:/1731757063997/Put/seqid=0 2024-11-16T11:37:44,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741845_1021 (size=6033) 2024-11-16T11:37:44,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741845_1021 (size=6033) 2024-11-16T11:37:44,090 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/8e9717985c1e4ff8bf0cdecccf0d0cf9 2024-11-16T11:37:44,094 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/.tmp/info/e640ba468b0d42b3ba745690d68c01b2 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72./info:regioninfo/1731757013948/Put/seqid=0 2024-11-16T11:37:44,097 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/.tmp/info/8e9717985c1e4ff8bf0cdecccf0d0cf9 as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/8e9717985c1e4ff8bf0cdecccf0d0cf9 2024-11-16T11:37:44,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741846_1022 (size=7308) 2024-11-16T11:37:44,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741846_1022 (size=7308) 2024-11-16T11:37:44,102 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/.tmp/info/e640ba468b0d42b3ba745690d68c01b2 2024-11-16T11:37:44,103 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/8e9717985c1e4ff8bf0cdecccf0d0cf9, entries=1, sequenceid=22, filesize=5.9 K 2024-11-16T11:37:44,105 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for dd0873104aa8588885420bd54efd8f72 in 30ms, sequenceid=22, compaction requested=true 2024-11-16T11:37:44,105 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/77f9969b6d01492994bd1cdbc978ae5c, hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/9a017ec2e7f04cc293f2b94639fecd15, hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/45b347a38a424e81b07c26e0956be896] to archive 2024-11-16T11:37:44,109 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T11:37:44,110 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/77f9969b6d01492994bd1cdbc978ae5c to hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/77f9969b6d01492994bd1cdbc978ae5c 2024-11-16T11:37:44,112 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/9a017ec2e7f04cc293f2b94639fecd15 to hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/9a017ec2e7f04cc293f2b94639fecd15 2024-11-16T11:37:44,113 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/45b347a38a424e81b07c26e0956be896 to hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/info/45b347a38a424e81b07c26e0956be896 2024-11-16T11:37:44,114 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a7948fca2832:43583 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T11:37:44,114 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [77f9969b6d01492994bd1cdbc978ae5c=6033, 9a017ec2e7f04cc293f2b94639fecd15=6033, 45b347a38a424e81b07c26e0956be896=6033] 2024-11-16T11:37:44,121 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/dd0873104aa8588885420bd54efd8f72/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-16T11:37:44,122 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:44,122 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for dd0873104aa8588885420bd54efd8f72: Waiting for close lock at 1731757064073Running coprocessor pre-close hooks at 1731757064073Disabling compacts and flushes for region at 1731757064073Disabling writes for close at 1731757064074 (+1 ms)Obtaining lock to block concurrent updates at 1731757064074Preparing flush snapshotting stores in dd0873104aa8588885420bd54efd8f72 at 1731757064074Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731757064075 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. at 1731757064075Flushing dd0873104aa8588885420bd54efd8f72/info: creating writer at 1731757064076 (+1 ms)Flushing dd0873104aa8588885420bd54efd8f72/info: appending metadata at 1731757064079 (+3 ms)Flushing dd0873104aa8588885420bd54efd8f72/info: closing flushed file at 1731757064079Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11f99e0a: reopening flushed file at 1731757064096 (+17 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for dd0873104aa8588885420bd54efd8f72 in 30ms, sequenceid=22, compaction requested=true at 1731757064105 (+9 ms)Writing region close event to WAL at 1731757064118 (+13 ms)Running coprocessor post-close hooks at 1731757064122 (+4 ms)Closed at 1731757064122 2024-11-16T11:37:44,122 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731757013588.dd0873104aa8588885420bd54efd8f72. 2024-11-16T11:37:44,130 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/.tmp/ns/8c8890f2a177478bbad15c41560d42ee is 43, key is default/ns:d/1731757013497/Put/seqid=0 2024-11-16T11:37:44,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741847_1023 (size=5153) 2024-11-16T11:37:44,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741847_1023 (size=5153) 2024-11-16T11:37:44,135 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/.tmp/ns/8c8890f2a177478bbad15c41560d42ee 2024-11-16T11:37:44,157 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/.tmp/table/25c162dee59748faad15580d9dffe27e is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731757013960/Put/seqid=0 2024-11-16T11:37:44,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741848_1024 (size=5508) 2024-11-16T11:37:44,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741848_1024 (size=5508) 2024-11-16T11:37:44,162 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/.tmp/table/25c162dee59748faad15580d9dffe27e 2024-11-16T11:37:44,168 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/.tmp/info/e640ba468b0d42b3ba745690d68c01b2 as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/info/e640ba468b0d42b3ba745690d68c01b2 2024-11-16T11:37:44,174 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/info/e640ba468b0d42b3ba745690d68c01b2, entries=10, sequenceid=11, filesize=7.1 K 2024-11-16T11:37:44,175 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/.tmp/ns/8c8890f2a177478bbad15c41560d42ee as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/ns/8c8890f2a177478bbad15c41560d42ee 2024-11-16T11:37:44,180 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/ns/8c8890f2a177478bbad15c41560d42ee, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T11:37:44,181 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/.tmp/table/25c162dee59748faad15580d9dffe27e as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/table/25c162dee59748faad15580d9dffe27e 2024-11-16T11:37:44,187 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/table/25c162dee59748faad15580d9dffe27e, entries=2, sequenceid=11, filesize=5.4 K 2024-11-16T11:37:44,188 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false 2024-11-16T11:37:44,200 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T11:37:44,200 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:37:44,200 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:37:44,200 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731757064074Running coprocessor pre-close hooks at 1731757064074Disabling compacts and flushes for region at 1731757064074Disabling writes for close at 1731757064075 (+1 ms)Obtaining lock to block concurrent updates at 1731757064075Preparing flush snapshotting stores in 1588230740 at 1731757064075Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731757064075Flushing stores of hbase:meta,,1.1588230740 at 1731757064076 (+1 ms)Flushing 1588230740/info: creating writer at 1731757064076Flushing 1588230740/info: appending metadata at 1731757064093 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731757064094 (+1 ms)Flushing 1588230740/ns: creating writer at 1731757064114 (+20 ms)Flushing 1588230740/ns: appending metadata at 1731757064130 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731757064130Flushing 1588230740/table: creating writer at 1731757064140 (+10 ms)Flushing 1588230740/table: appending metadata at 1731757064156 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731757064156Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e3f6e79: reopening flushed file at 1731757064167 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46fb0cc7: reopening flushed file at 1731757064174 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f311aaa: reopening flushed file at 1731757064180 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false at 1731757064188 (+8 ms)Writing region close event to WAL at 1731757064196 (+8 ms)Running coprocessor post-close hooks at 1731757064200 (+4 ms)Closed at 1731757064200 2024-11-16T11:37:44,201 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T11:37:44,274 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(976): stopping server a7948fca2832,33179,1731757012444; all regions closed. 2024-11-16T11:37:44,275 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,275 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,276 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,276 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,276 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741834_1010 (size=3306) 2024-11-16T11:37:44,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741834_1010 (size=3306) 2024-11-16T11:37:44,282 DEBUG [RS:0;a7948fca2832:33179 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/oldWALs 2024-11-16T11:37:44,282 INFO [RS:0;a7948fca2832:33179 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C33179%2C1731757012444.meta:.meta(num 1731757013419) 2024-11-16T11:37:44,282 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,282 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,283 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,283 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,283 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741844_1020 (size=1252) 2024-11-16T11:37:44,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741844_1020 (size=1252) 2024-11-16T11:37:44,288 DEBUG [RS:0;a7948fca2832:33179 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/oldWALs 2024-11-16T11:37:44,288 INFO [RS:0;a7948fca2832:33179 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C33179%2C1731757012444:(num 1731757063999) 2024-11-16T11:37:44,288 DEBUG [RS:0;a7948fca2832:33179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:37:44,288 INFO [RS:0;a7948fca2832:33179 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:37:44,288 INFO [RS:0;a7948fca2832:33179 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:37:44,289 INFO [RS:0;a7948fca2832:33179 {}] hbase.ChoreService(370): Chore service for: regionserver/a7948fca2832:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T11:37:44,289 INFO [RS:0;a7948fca2832:33179 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:37:44,289 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:37:44,289 INFO [RS:0;a7948fca2832:33179 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33179 2024-11-16T11:37:44,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7948fca2832,33179,1731757012444 2024-11-16T11:37:44,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:37:44,309 INFO [RS:0;a7948fca2832:33179 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:37:44,309 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7948fca2832,33179,1731757012444] 2024-11-16T11:37:44,329 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7948fca2832,33179,1731757012444 already deleted, retry=false 2024-11-16T11:37:44,329 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7948fca2832,33179,1731757012444 expired; onlineServers=0 2024-11-16T11:37:44,329 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7948fca2832,43583,1731757012277' ***** 2024-11-16T11:37:44,329 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T11:37:44,329 INFO [M:0;a7948fca2832:43583 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:37:44,330 INFO [M:0;a7948fca2832:43583 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:37:44,330 DEBUG [M:0;a7948fca2832:43583 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T11:37:44,330 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T11:37:44,330 DEBUG [M:0;a7948fca2832:43583 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T11:37:44,330 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731757012787 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731757012787,5,FailOnTimeoutGroup] 2024-11-16T11:37:44,330 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731757012787 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731757012787,5,FailOnTimeoutGroup] 2024-11-16T11:37:44,330 INFO [M:0;a7948fca2832:43583 {}] hbase.ChoreService(370): Chore service for: master/a7948fca2832:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T11:37:44,330 INFO [M:0;a7948fca2832:43583 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:37:44,330 DEBUG [M:0;a7948fca2832:43583 {}] master.HMaster(1795): Stopping service threads 2024-11-16T11:37:44,330 INFO [M:0;a7948fca2832:43583 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T11:37:44,330 INFO [M:0;a7948fca2832:43583 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:37:44,330 INFO [M:0;a7948fca2832:43583 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T11:37:44,330 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T11:37:44,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T11:37:44,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:44,340 DEBUG [M:0;a7948fca2832:43583 {}] zookeeper.ZKUtil(347): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T11:37:44,340 WARN [M:0;a7948fca2832:43583 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T11:37:44,341 INFO [M:0;a7948fca2832:43583 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/.lastflushedseqids 2024-11-16T11:37:44,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741849_1025 (size=130) 2024-11-16T11:37:44,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741849_1025 (size=130) 2024-11-16T11:37:44,347 INFO [M:0;a7948fca2832:43583 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T11:37:44,347 INFO [M:0;a7948fca2832:43583 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T11:37:44,347 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:37:44,347 INFO [M:0;a7948fca2832:43583 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:37:44,347 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:37:44,347 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:37:44,347 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:37:44,348 INFO [M:0;a7948fca2832:43583 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-16T11:37:44,369 DEBUG [M:0;a7948fca2832:43583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8b13a2ed0e347d2a14d1095324973cc is 82, key is hbase:meta,,1/info:regioninfo/1731757013445/Put/seqid=0 2024-11-16T11:37:44,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741850_1026 (size=5672) 2024-11-16T11:37:44,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741850_1026 (size=5672) 2024-11-16T11:37:44,377 INFO [M:0;a7948fca2832:43583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8b13a2ed0e347d2a14d1095324973cc 2024-11-16T11:37:44,399 DEBUG [M:0;a7948fca2832:43583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/794e313cda5146a9b007acda47bba9f1 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731757013965/Put/seqid=0 2024-11-16T11:37:44,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741851_1027 (size=7823) 2024-11-16T11:37:44,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741851_1027 (size=7823) 2024-11-16T11:37:44,404 INFO [M:0;a7948fca2832:43583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/794e313cda5146a9b007acda47bba9f1 2024-11-16T11:37:44,409 INFO [M:0;a7948fca2832:43583 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 794e313cda5146a9b007acda47bba9f1 2024-11-16T11:37:44,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:37:44,419 INFO [RS:0;a7948fca2832:33179 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:37:44,419 INFO [RS:0;a7948fca2832:33179 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7948fca2832,33179,1731757012444; zookeeper connection closed. 2024-11-16T11:37:44,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33179-0x101436ef0d00001, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:37:44,419 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4f8c6c4a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4f8c6c4a 2024-11-16T11:37:44,420 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T11:37:44,429 DEBUG [M:0;a7948fca2832:43583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf071edb8b7f4f05ba3511298c7d620c is 69, key is a7948fca2832,33179,1731757012444/rs:state/1731757012907/Put/seqid=0 2024-11-16T11:37:44,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741852_1028 (size=5156) 2024-11-16T11:37:44,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741852_1028 (size=5156) 2024-11-16T11:37:44,434 INFO [M:0;a7948fca2832:43583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf071edb8b7f4f05ba3511298c7d620c 2024-11-16T11:37:44,454 DEBUG [M:0;a7948fca2832:43583 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c4d733e200a54143a2e3726805cad733 is 52, key is load_balancer_on/state:d/1731757013583/Put/seqid=0 2024-11-16T11:37:44,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741853_1029 (size=5056) 2024-11-16T11:37:44,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741853_1029 (size=5056) 2024-11-16T11:37:44,459 INFO [M:0;a7948fca2832:43583 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c4d733e200a54143a2e3726805cad733 2024-11-16T11:37:44,464 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8b13a2ed0e347d2a14d1095324973cc as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f8b13a2ed0e347d2a14d1095324973cc 2024-11-16T11:37:44,469 INFO [M:0;a7948fca2832:43583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f8b13a2ed0e347d2a14d1095324973cc, entries=8, sequenceid=121, filesize=5.5 K 2024-11-16T11:37:44,470 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/794e313cda5146a9b007acda47bba9f1 as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/794e313cda5146a9b007acda47bba9f1 2024-11-16T11:37:44,475 INFO [M:0;a7948fca2832:43583 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 794e313cda5146a9b007acda47bba9f1 2024-11-16T11:37:44,475 INFO [M:0;a7948fca2832:43583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/794e313cda5146a9b007acda47bba9f1, entries=14, sequenceid=121, filesize=7.6 K 2024-11-16T11:37:44,477 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf071edb8b7f4f05ba3511298c7d620c as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf071edb8b7f4f05ba3511298c7d620c 2024-11-16T11:37:44,482 INFO [M:0;a7948fca2832:43583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf071edb8b7f4f05ba3511298c7d620c, entries=1, sequenceid=121, filesize=5.0 K 2024-11-16T11:37:44,483 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c4d733e200a54143a2e3726805cad733 as hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c4d733e200a54143a2e3726805cad733 2024-11-16T11:37:44,488 INFO [M:0;a7948fca2832:43583 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33487/user/jenkins/test-data/17641dd4-2cfc-c70d-927d-f8c4e6206467/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c4d733e200a54143a2e3726805cad733, entries=1, sequenceid=121, filesize=4.9 K 2024-11-16T11:37:44,489 INFO [M:0;a7948fca2832:43583 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=121, compaction requested=false 2024-11-16T11:37:44,490 INFO [M:0;a7948fca2832:43583 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:37:44,490 DEBUG [M:0;a7948fca2832:43583 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731757064347Disabling compacts and flushes for region at 1731757064347Disabling writes for close at 1731757064347Obtaining lock to block concurrent updates at 1731757064348 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731757064348Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1731757064348Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731757064349 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731757064349Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731757064368 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731757064368Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731757064383 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731757064398 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731757064398Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731757064410 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731757064428 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731757064428Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731757064438 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731757064453 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731757064453Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d03bf96: reopening flushed file at 1731757064463 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35280270: reopening flushed file at 1731757064469 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12ef8eed: reopening flushed file at 1731757064476 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@790ee328: reopening flushed file at 1731757064482 (+6 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=121, compaction requested=false at 1731757064489 (+7 ms)Writing region close event to WAL at 1731757064490 (+1 ms)Closed at 1731757064490 2024-11-16T11:37:44,491 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,491 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,491 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,491 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,491 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:37:44,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35797 is added to blk_1073741830_1006 (size=53035) 2024-11-16T11:37:44,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40361 is added to blk_1073741830_1006 (size=53035) 2024-11-16T11:37:44,493 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:37:44,493 INFO [M:0;a7948fca2832:43583 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T11:37:44,494 INFO [M:0;a7948fca2832:43583 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43583 2024-11-16T11:37:44,494 INFO [M:0;a7948fca2832:43583 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:37:44,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:37:44,602 INFO [M:0;a7948fca2832:43583 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:37:44,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43583-0x101436ef0d00000, quorum=127.0.0.1:63364, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:37:44,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:44,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f25864e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:37:44,637 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@117a5d75{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:37:44,637 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:37:44,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fcb7d58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:37:44,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f07a5bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.log.dir/,STOPPED} 2024-11-16T11:37:44,639 WARN [BP-161704216-172.17.0.2-1731757009739 heartbeating to localhost/127.0.0.1:33487 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:37:44,639 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:37:44,639 WARN [BP-161704216-172.17.0.2-1731757009739 heartbeating to localhost/127.0.0.1:33487 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-161704216-172.17.0.2-1731757009739 (Datanode Uuid 342acf04-c7ac-413c-9876-4a722480f6f5) service to localhost/127.0.0.1:33487 2024-11-16T11:37:44,639 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:37:44,640 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/data/data3/current/BP-161704216-172.17.0.2-1731757009739 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:37:44,640 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/data/data4/current/BP-161704216-172.17.0.2-1731757009739 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:37:44,640 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:37:44,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:44,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35783ebf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:37:44,643 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5acc7ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:37:44,643 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:37:44,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21e05b91{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:37:44,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e47a0e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.log.dir/,STOPPED} 2024-11-16T11:37:44,645 WARN [BP-161704216-172.17.0.2-1731757009739 heartbeating to localhost/127.0.0.1:33487 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:37:44,645 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:37:44,645 WARN [BP-161704216-172.17.0.2-1731757009739 heartbeating to localhost/127.0.0.1:33487 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-161704216-172.17.0.2-1731757009739 (Datanode Uuid 7ba2dbc6-adad-46e9-a509-620c83b55fd6) service to localhost/127.0.0.1:33487 2024-11-16T11:37:44,645 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:37:44,645 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/data/data1/current/BP-161704216-172.17.0.2-1731757009739 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:37:44,646 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/cluster_5c6bb902-580c-7056-c178-10469046893e/data/data2/current/BP-161704216-172.17.0.2-1731757009739 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:37:44,646 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:37:44,651 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7528c100{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:37:44,652 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20675422{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:37:44,652 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:37:44,652 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20104ad5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:37:44,652 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@558e6a7f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.log.dir/,STOPPED} 2024-11-16T11:37:44,659 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T11:37:44,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T11:37:44,689 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:33487 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33487 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:33487 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33487 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:33487 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:33487 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/a7948fca2832:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33487 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33487 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=236 (was 250), ProcessCount=11 (was 11), AvailableMemoryMB=3671 (was 3792) 2024-11-16T11:37:44,696 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=236, ProcessCount=11, AvailableMemoryMB=3671 2024-11-16T11:37:44,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T11:37:44,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.log.dir so I do NOT create it in target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2 2024-11-16T11:37:44,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/156b2b8f-e671-3266-c58b-f6a1fa57ad57/hadoop.tmp.dir so I do NOT create it in target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2 2024-11-16T11:37:44,696 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f, deleteOnExit=true 2024-11-16T11:37:44,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/test.cache.data in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.log.dir in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T11:37:44,697 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T11:37:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:37:44,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:37:44,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T11:37:44,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/nfs.dump.dir in system properties and HBase conf 2024-11-16T11:37:44,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/java.io.tmpdir in system properties and HBase conf 2024-11-16T11:37:44,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:37:44,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T11:37:44,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T11:37:44,711 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:37:44,927 INFO [regionserver/a7948fca2832:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:37:45,040 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:37:45,044 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:37:45,046 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:37:45,046 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:37:45,046 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:37:45,046 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:37:45,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@456bd1e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:37:45,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fb22e39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:37:45,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52f2ae6f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/java.io.tmpdir/jetty-localhost-33315-hadoop-hdfs-3_4_1-tests_jar-_-any-9603950474358927452/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:37:45,162 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d3fac28{HTTP/1.1, (http/1.1)}{localhost:33315} 2024-11-16T11:37:45,162 INFO [Time-limited test {}] server.Server(415): Started @250384ms 2024-11-16T11:37:45,176 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:37:45,465 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:37:45,469 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:37:45,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:37:45,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:37:45,470 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:37:45,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@645803e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:37:45,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f79190{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:37:45,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d3496cb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/java.io.tmpdir/jetty-localhost-34297-hadoop-hdfs-3_4_1-tests_jar-_-any-763277884078880473/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:37:45,573 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ce23f2e{HTTP/1.1, (http/1.1)}{localhost:34297} 2024-11-16T11:37:45,573 INFO [Time-limited test {}] server.Server(415): Started @250795ms 2024-11-16T11:37:45,574 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:37:45,599 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:37:45,602 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:37:45,602 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:37:45,602 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:37:45,602 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:37:45,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49e2f900{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:37:45,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ea6ab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:37:45,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:45,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:45,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55f5baef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/java.io.tmpdir/jetty-localhost-41219-hadoop-hdfs-3_4_1-tests_jar-_-any-2172150458702359986/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:37:45,705 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@737c094c{HTTP/1.1, (http/1.1)}{localhost:41219} 2024-11-16T11:37:45,705 INFO [Time-limited test {}] server.Server(415): Started @250927ms 2024-11-16T11:37:45,706 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:37:46,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:46,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:46,823 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/data/data1/current/BP-1741585453-172.17.0.2-1731757064714/current, will proceed with Du for space computation calculation, 2024-11-16T11:37:46,823 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/data/data2/current/BP-1741585453-172.17.0.2-1731757064714/current, will proceed with Du for space computation calculation, 2024-11-16T11:37:46,839 WARN [Thread-1936 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:37:46,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x405fc4dc9a12c87c with lease ID 0x5779eaa23f87ef2e: Processing first storage report for DS-b2f05528-596b-43af-8923-f7f1b7584727 from datanode DatanodeRegistration(127.0.0.1:39767, datanodeUuid=b6559877-6e76-4e9c-8c90-ae570e87e06f, infoPort=38709, infoSecurePort=0, ipcPort=39491, storageInfo=lv=-57;cid=testClusterID;nsid=2025052389;c=1731757064714) 2024-11-16T11:37:46,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x405fc4dc9a12c87c with lease ID 0x5779eaa23f87ef2e: from storage DS-b2f05528-596b-43af-8923-f7f1b7584727 node DatanodeRegistration(127.0.0.1:39767, datanodeUuid=b6559877-6e76-4e9c-8c90-ae570e87e06f, infoPort=38709, infoSecurePort=0, ipcPort=39491, storageInfo=lv=-57;cid=testClusterID;nsid=2025052389;c=1731757064714), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:37:46,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x405fc4dc9a12c87c with lease ID 0x5779eaa23f87ef2e: Processing first storage report for DS-17a3f6fe-52e8-4292-92a7-0ff3fe9f6032 from datanode DatanodeRegistration(127.0.0.1:39767, datanodeUuid=b6559877-6e76-4e9c-8c90-ae570e87e06f, infoPort=38709, infoSecurePort=0, ipcPort=39491, storageInfo=lv=-57;cid=testClusterID;nsid=2025052389;c=1731757064714) 2024-11-16T11:37:46,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x405fc4dc9a12c87c with lease ID 0x5779eaa23f87ef2e: from storage DS-17a3f6fe-52e8-4292-92a7-0ff3fe9f6032 node DatanodeRegistration(127.0.0.1:39767, datanodeUuid=b6559877-6e76-4e9c-8c90-ae570e87e06f, infoPort=38709, infoSecurePort=0, ipcPort=39491, storageInfo=lv=-57;cid=testClusterID;nsid=2025052389;c=1731757064714), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T11:37:46,962 WARN [Thread-1983 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/data/data3/current/BP-1741585453-172.17.0.2-1731757064714/current, will proceed with Du for space computation calculation, 2024-11-16T11:37:46,962 WARN [Thread-1984 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/data/data4/current/BP-1741585453-172.17.0.2-1731757064714/current, will proceed with Du for space computation calculation, 2024-11-16T11:37:46,979 WARN [Thread-1959 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:37:46,981 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e49fdf6b6856e34 with lease ID 0x5779eaa23f87ef2f: Processing first storage report for DS-58c58062-fdc7-4a05-b49a-a22a4ddc07d7 from datanode DatanodeRegistration(127.0.0.1:44187, datanodeUuid=590a83ac-2b74-4e93-8bb6-e132d23538f3, infoPort=40855, infoSecurePort=0, ipcPort=33771, storageInfo=lv=-57;cid=testClusterID;nsid=2025052389;c=1731757064714) 2024-11-16T11:37:46,981 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e49fdf6b6856e34 with lease ID 0x5779eaa23f87ef2f: from storage DS-58c58062-fdc7-4a05-b49a-a22a4ddc07d7 node DatanodeRegistration(127.0.0.1:44187, datanodeUuid=590a83ac-2b74-4e93-8bb6-e132d23538f3, infoPort=40855, infoSecurePort=0, ipcPort=33771, storageInfo=lv=-57;cid=testClusterID;nsid=2025052389;c=1731757064714), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:37:46,981 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e49fdf6b6856e34 with lease ID 0x5779eaa23f87ef2f: Processing first storage report for DS-879a6641-c951-4efa-af66-1bccc9b58b9f from datanode DatanodeRegistration(127.0.0.1:44187, datanodeUuid=590a83ac-2b74-4e93-8bb6-e132d23538f3, infoPort=40855, infoSecurePort=0, ipcPort=33771, storageInfo=lv=-57;cid=testClusterID;nsid=2025052389;c=1731757064714) 2024-11-16T11:37:46,981 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e49fdf6b6856e34 with lease ID 0x5779eaa23f87ef2f: from storage DS-879a6641-c951-4efa-af66-1bccc9b58b9f node DatanodeRegistration(127.0.0.1:44187, datanodeUuid=590a83ac-2b74-4e93-8bb6-e132d23538f3, infoPort=40855, infoSecurePort=0, ipcPort=33771, storageInfo=lv=-57;cid=testClusterID;nsid=2025052389;c=1731757064714), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:37:47,038 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2 2024-11-16T11:37:47,044 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/zookeeper_0, clientPort=55822, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T11:37:47,045 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55822 2024-11-16T11:37:47,045 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:37:47,047 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:37:47,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:37:47,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:37:47,060 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472 with version=8 2024-11-16T11:37:47,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/hbase-staging 2024-11-16T11:37:47,063 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:37:47,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:37:47,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:37:47,063 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:37:47,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:37:47,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:37:47,063 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T11:37:47,064 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:37:47,064 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44903 2024-11-16T11:37:47,066 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44903 connecting to ZooKeeper ensemble=127.0.0.1:55822 2024-11-16T11:37:47,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:449030x0, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:37:47,120 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44903-0x101436fc6cc0000 connected 2024-11-16T11:37:47,207 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:37:47,209 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:37:47,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:37:47,212 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472, hbase.cluster.distributed=false 2024-11-16T11:37:47,215 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:37:47,215 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44903 2024-11-16T11:37:47,215 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44903 2024-11-16T11:37:47,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44903 2024-11-16T11:37:47,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44903 2024-11-16T11:37:47,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44903 2024-11-16T11:37:47,235 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:37:47,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:37:47,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:37:47,235 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:37:47,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:37:47,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:37:47,236 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T11:37:47,236 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:37:47,236 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45505 2024-11-16T11:37:47,238 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45505 connecting to ZooKeeper ensemble=127.0.0.1:55822 2024-11-16T11:37:47,238 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:37:47,240 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:37:47,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455050x0, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:37:47,249 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:455050x0, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:37:47,249 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45505-0x101436fc6cc0001 connected 2024-11-16T11:37:47,250 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T11:37:47,250 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T11:37:47,251 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T11:37:47,251 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:37:47,252 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45505 2024-11-16T11:37:47,252 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45505 2024-11-16T11:37:47,252 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45505 2024-11-16T11:37:47,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45505 2024-11-16T11:37:47,259 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45505 2024-11-16T11:37:47,271 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7948fca2832:44903 2024-11-16T11:37:47,271 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7948fca2832,44903,1731757067063 2024-11-16T11:37:47,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:37:47,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:37:47,281 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7948fca2832,44903,1731757067063 2024-11-16T11:37:47,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T11:37:47,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,291 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T11:37:47,292 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7948fca2832,44903,1731757067063 from backup master directory 2024-11-16T11:37:47,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7948fca2832,44903,1731757067063 2024-11-16T11:37:47,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:37:47,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:37:47,302 WARN [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:37:47,302 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7948fca2832,44903,1731757067063 2024-11-16T11:37:47,308 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/hbase.id] with ID: 28f14215-9134-4cee-bc10-45eebb08c7d5 2024-11-16T11:37:47,308 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/.tmp/hbase.id 2024-11-16T11:37:47,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:37:47,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:37:47,314 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/.tmp/hbase.id]:[hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/hbase.id] 2024-11-16T11:37:47,325 INFO [master/a7948fca2832:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:37:47,325 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T11:37:47,327 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T11:37:47,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:37:47,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:37:47,350 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:37:47,351 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T11:37:47,351 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:37:47,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:37:47,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:37:47,358 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store 2024-11-16T11:37:47,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:37:47,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:37:47,364 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:37:47,364 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:37:47,364 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:37:47,364 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:37:47,364 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:37:47,364 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:37:47,365 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:37:47,365 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731757067364Disabling compacts and flushes for region at 1731757067364Disabling writes for close at 1731757067364Writing region close event to WAL at 1731757067365 (+1 ms)Closed at 1731757067365 2024-11-16T11:37:47,365 WARN [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/.initializing 2024-11-16T11:37:47,365 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/WALs/a7948fca2832,44903,1731757067063 2024-11-16T11:37:47,368 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C44903%2C1731757067063, suffix=, logDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/WALs/a7948fca2832,44903,1731757067063, archiveDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/oldWALs, maxLogs=10 2024-11-16T11:37:47,368 INFO [master/a7948fca2832:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C44903%2C1731757067063.1731757067368 2024-11-16T11:37:47,373 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/WALs/a7948fca2832,44903,1731757067063/a7948fca2832%2C44903%2C1731757067063.1731757067368 2024-11-16T11:37:47,374 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38709:38709),(127.0.0.1/127.0.0.1:40855:40855)] 2024-11-16T11:37:47,375 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:37:47,375 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:37:47,375 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,375 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T11:37:47,378 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:47,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:37:47,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T11:37:47,380 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:47,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:37:47,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T11:37:47,382 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:47,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:37:47,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T11:37:47,384 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:47,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:37:47,385 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,386 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,386 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,387 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,388 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,388 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T11:37:47,390 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:37:47,392 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:37:47,393 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794102, jitterRate=0.00975307822227478}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T11:37:47,393 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731757067375Initializing all the Stores at 1731757067376 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757067376Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757067376Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757067376Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757067376Cleaning up temporary data from old regions at 1731757067388 (+12 ms)Region opened successfully at 1731757067393 (+5 ms) 2024-11-16T11:37:47,394 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T11:37:47,397 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fc21c32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:37:47,398 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T11:37:47,398 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T11:37:47,398 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T11:37:47,398 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T11:37:47,399 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T11:37:47,399 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T11:37:47,399 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T11:37:47,401 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T11:37:47,402 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T11:37:47,413 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T11:37:47,414 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T11:37:47,415 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T11:37:47,424 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T11:37:47,424 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T11:37:47,425 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T11:37:47,434 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T11:37:47,436 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T11:37:47,445 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T11:37:47,448 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T11:37:47,459 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T11:37:47,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:37:47,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:37:47,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,471 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7948fca2832,44903,1731757067063, sessionid=0x101436fc6cc0000, setting cluster-up flag (Was=false) 2024-11-16T11:37:47,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,522 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T11:37:47,524 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,44903,1731757067063 2024-11-16T11:37:47,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:47,575 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T11:37:47,576 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,44903,1731757067063 2024-11-16T11:37:47,577 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T11:37:47,579 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T11:37:47,579 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T11:37:47,579 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T11:37:47,579 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7948fca2832,44903,1731757067063 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T11:37:47,581 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:37:47,581 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:37:47,581 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:37:47,581 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:37:47,581 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7948fca2832:0, corePoolSize=10, maxPoolSize=10 2024-11-16T11:37:47,581 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,581 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:37:47,581 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,582 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731757097582 2024-11-16T11:37:47,582 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T11:37:47,582 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T11:37:47,582 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T11:37:47,582 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T11:37:47,582 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T11:37:47,582 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T11:37:47,583 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,583 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T11:37:47,583 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:37:47,583 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T11:37:47,583 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T11:37:47,583 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T11:37:47,583 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T11:37:47,583 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T11:37:47,584 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731757067583,5,FailOnTimeoutGroup] 2024-11-16T11:37:47,584 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731757067584,5,FailOnTimeoutGroup] 2024-11-16T11:37:47,584 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,584 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T11:37:47,584 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,584 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,584 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:47,584 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T11:37:47,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:37:47,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:37:47,592 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T11:37:47,592 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472 2024-11-16T11:37:47,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:37:47,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:37:47,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:37:47,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:37:47,601 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:37:47,601 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:47,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:37:47,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:37:47,603 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:37:47,603 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:47,604 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:37:47,604 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:37:47,605 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:37:47,605 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:47,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:37:47,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:37:47,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:37:47,607 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:47,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:37:47,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:37:47,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740 2024-11-16T11:37:47,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740 2024-11-16T11:37:47,610 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:37:47,610 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:37:47,611 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:37:47,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:37:47,614 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:37:47,614 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875144, jitterRate=0.11280381679534912}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:37:47,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731757067599Initializing all the Stores at 1731757067599Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757067599Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757067600 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757067600Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757067600Cleaning up temporary data from old regions at 1731757067610 (+10 ms)Region opened successfully at 1731757067615 (+5 ms) 2024-11-16T11:37:47,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:37:47,615 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:37:47,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:37:47,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:37:47,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:37:47,615 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:37:47,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731757067615Disabling compacts and flushes for region at 1731757067615Disabling writes for close at 1731757067615Writing region close event to WAL at 1731757067615Closed at 1731757067615 2024-11-16T11:37:47,616 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:37:47,617 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T11:37:47,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T11:37:47,618 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:37:47,619 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T11:37:47,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:47,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:47,661 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(746): ClusterId : 28f14215-9134-4cee-bc10-45eebb08c7d5 2024-11-16T11:37:47,661 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T11:37:47,675 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T11:37:47,675 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T11:37:47,688 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T11:37:47,688 DEBUG [RS:0;a7948fca2832:45505 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78736565, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:37:47,703 DEBUG [RS:0;a7948fca2832:45505 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7948fca2832:45505 2024-11-16T11:37:47,703 INFO [RS:0;a7948fca2832:45505 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T11:37:47,703 INFO [RS:0;a7948fca2832:45505 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T11:37:47,703 DEBUG [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T11:37:47,704 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7948fca2832,44903,1731757067063 with port=45505, startcode=1731757067235 2024-11-16T11:37:47,704 DEBUG [RS:0;a7948fca2832:45505 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T11:37:47,706 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39741, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T11:37:47,707 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44903 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7948fca2832,45505,1731757067235 2024-11-16T11:37:47,707 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44903 {}] master.ServerManager(517): Registering regionserver=a7948fca2832,45505,1731757067235 2024-11-16T11:37:47,708 DEBUG [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472 2024-11-16T11:37:47,708 DEBUG [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36419 2024-11-16T11:37:47,708 DEBUG [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T11:37:47,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:37:47,719 DEBUG [RS:0;a7948fca2832:45505 {}] zookeeper.ZKUtil(111): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7948fca2832,45505,1731757067235 2024-11-16T11:37:47,719 WARN [RS:0;a7948fca2832:45505 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:37:47,719 INFO [RS:0;a7948fca2832:45505 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:37:47,719 DEBUG [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235 2024-11-16T11:37:47,720 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7948fca2832,45505,1731757067235] 2024-11-16T11:37:47,723 INFO [RS:0;a7948fca2832:45505 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T11:37:47,724 INFO [RS:0;a7948fca2832:45505 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T11:37:47,725 INFO [RS:0;a7948fca2832:45505 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T11:37:47,725 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,725 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T11:37:47,726 INFO [RS:0;a7948fca2832:45505 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T11:37:47,726 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,726 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,726 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,726 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,726 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,726 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,726 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:37:47,726 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,727 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,727 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,727 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,727 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,727 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:37:47,727 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:37:47,727 DEBUG [RS:0;a7948fca2832:45505 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:37:47,727 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,727 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,727 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,727 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,727 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,727 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,45505,1731757067235-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:37:47,745 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T11:37:47,745 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,45505,1731757067235-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,745 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,745 INFO [RS:0;a7948fca2832:45505 {}] regionserver.Replication(171): a7948fca2832,45505,1731757067235 started 2024-11-16T11:37:47,760 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:47,760 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(1482): Serving as a7948fca2832,45505,1731757067235, RpcServer on a7948fca2832/172.17.0.2:45505, sessionid=0x101436fc6cc0001 2024-11-16T11:37:47,761 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T11:37:47,761 DEBUG [RS:0;a7948fca2832:45505 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7948fca2832,45505,1731757067235 2024-11-16T11:37:47,761 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,45505,1731757067235' 2024-11-16T11:37:47,761 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T11:37:47,761 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T11:37:47,761 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T11:37:47,762 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T11:37:47,762 DEBUG [RS:0;a7948fca2832:45505 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7948fca2832,45505,1731757067235 2024-11-16T11:37:47,762 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,45505,1731757067235' 2024-11-16T11:37:47,762 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T11:37:47,762 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T11:37:47,762 DEBUG [RS:0;a7948fca2832:45505 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T11:37:47,762 INFO [RS:0;a7948fca2832:45505 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T11:37:47,762 INFO [RS:0;a7948fca2832:45505 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T11:37:47,770 WARN [a7948fca2832:44903 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T11:37:47,865 INFO [RS:0;a7948fca2832:45505 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C45505%2C1731757067235, suffix=, logDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235, archiveDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/oldWALs, maxLogs=32 2024-11-16T11:37:47,865 INFO [RS:0;a7948fca2832:45505 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C45505%2C1731757067235.1731757067865 2024-11-16T11:37:47,873 INFO [RS:0;a7948fca2832:45505 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235/a7948fca2832%2C45505%2C1731757067235.1731757067865 2024-11-16T11:37:47,874 DEBUG [RS:0;a7948fca2832:45505 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38709:38709),(127.0.0.1/127.0.0.1:40855:40855)] 2024-11-16T11:37:48,020 DEBUG [a7948fca2832:44903 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T11:37:48,021 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7948fca2832,45505,1731757067235 2024-11-16T11:37:48,022 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,45505,1731757067235, state=OPENING 2024-11-16T11:37:48,070 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T11:37:48,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:48,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:37:48,081 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:37:48,081 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:37:48,081 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:37:48,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,45505,1731757067235}] 2024-11-16T11:37:48,235 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T11:37:48,237 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39129, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T11:37:48,241 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T11:37:48,241 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:37:48,244 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C45505%2C1731757067235.meta, suffix=.meta, logDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235, archiveDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/oldWALs, maxLogs=32 2024-11-16T11:37:48,245 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C45505%2C1731757067235.meta.1731757068245.meta 2024-11-16T11:37:48,249 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235/a7948fca2832%2C45505%2C1731757067235.meta.1731757068245.meta 2024-11-16T11:37:48,250 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40855:40855),(127.0.0.1/127.0.0.1:38709:38709)] 2024-11-16T11:37:48,251 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:37:48,251 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T11:37:48,251 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T11:37:48,251 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T11:37:48,251 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T11:37:48,251 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:37:48,251 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T11:37:48,251 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T11:37:48,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:37:48,253 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:37:48,253 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:48,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:37:48,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:37:48,255 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:37:48,255 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:48,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:37:48,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:37:48,256 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:37:48,256 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:48,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:37:48,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:37:48,257 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:37:48,257 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:48,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:37:48,257 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:37:48,258 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740 2024-11-16T11:37:48,259 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740 2024-11-16T11:37:48,260 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:37:48,260 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:37:48,260 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:37:48,262 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:37:48,262 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794159, jitterRate=0.009826228022575378}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:37:48,262 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T11:37:48,263 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731757068252Writing region info on filesystem at 1731757068252Initializing all the Stores at 1731757068252Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757068252Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757068252Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757068252Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757068253 (+1 ms)Cleaning up temporary data from old regions at 1731757068260 (+7 ms)Running coprocessor post-open hooks at 1731757068262 (+2 ms)Region opened successfully at 1731757068263 (+1 ms) 2024-11-16T11:37:48,264 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731757068235 2024-11-16T11:37:48,266 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T11:37:48,266 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T11:37:48,267 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,45505,1731757067235 2024-11-16T11:37:48,268 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,45505,1731757067235, state=OPEN 2024-11-16T11:37:48,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:37:48,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:37:48,309 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7948fca2832,45505,1731757067235 2024-11-16T11:37:48,309 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:37:48,309 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:37:48,312 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T11:37:48,312 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,45505,1731757067235 in 228 msec 2024-11-16T11:37:48,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T11:37:48,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 695 msec 2024-11-16T11:37:48,314 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:37:48,314 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T11:37:48,316 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:37:48,316 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,45505,1731757067235, seqNum=-1] 2024-11-16T11:37:48,317 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:37:48,318 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33107, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:37:48,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 744 msec 2024-11-16T11:37:48,324 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731757068324, completionTime=-1 2024-11-16T11:37:48,324 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T11:37:48,324 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T11:37:48,326 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T11:37:48,326 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731757128326 2024-11-16T11:37:48,326 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731757188326 2024-11-16T11:37:48,326 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T11:37:48,326 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,44903,1731757067063-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:48,326 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,44903,1731757067063-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:48,326 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,44903,1731757067063-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:48,326 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7948fca2832:44903, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:48,326 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:48,327 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:48,328 DEBUG [master/a7948fca2832:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T11:37:48,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.028sec 2024-11-16T11:37:48,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T11:37:48,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T11:37:48,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T11:37:48,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T11:37:48,330 INFO [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T11:37:48,331 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,44903,1731757067063-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:37:48,331 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,44903,1731757067063-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T11:37:48,333 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T11:37:48,333 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T11:37:48,333 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,44903,1731757067063-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:37:48,361 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4eb83964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:37:48,361 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7948fca2832,44903,-1 for getting cluster id 2024-11-16T11:37:48,361 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T11:37:48,362 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '28f14215-9134-4cee-bc10-45eebb08c7d5' 2024-11-16T11:37:48,363 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T11:37:48,363 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "28f14215-9134-4cee-bc10-45eebb08c7d5" 2024-11-16T11:37:48,363 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fefeea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:37:48,363 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7948fca2832,44903,-1] 2024-11-16T11:37:48,363 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T11:37:48,364 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:37:48,365 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49684, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T11:37:48,365 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e8b29da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:37:48,366 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:37:48,367 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,45505,1731757067235, seqNum=-1] 2024-11-16T11:37:48,367 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:37:48,368 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50670, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:37:48,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7948fca2832,44903,1731757067063 2024-11-16T11:37:48,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:37:48,372 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T11:37:48,372 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T11:37:48,373 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is a7948fca2832,44903,1731757067063 2024-11-16T11:37:48,373 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3f98ea4a 2024-11-16T11:37:48,374 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T11:37:48,375 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49690, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T11:37:48,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44903 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T11:37:48,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44903 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T11:37:48,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44903 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:37:48,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44903 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-16T11:37:48,378 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T11:37:48,378 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:48,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44903 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-16T11:37:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44903 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:37:48,379 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T11:37:48,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741835_1011 (size=381) 2024-11-16T11:37:48,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741835_1011 (size=381) 2024-11-16T11:37:48,387 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ffee239c077be3582f21707b18f768b4, NAME => 'TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472 2024-11-16T11:37:48,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741836_1012 (size=64) 2024-11-16T11:37:48,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741836_1012 (size=64) 2024-11-16T11:37:48,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:37:48,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ffee239c077be3582f21707b18f768b4, disabling compactions & flushes 2024-11-16T11:37:48,394 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:37:48,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:37:48,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. after waiting 0 ms 2024-11-16T11:37:48,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:37:48,394 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:37:48,394 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ffee239c077be3582f21707b18f768b4: Waiting for close lock at 1731757068394Disabling compacts and flushes for region at 1731757068394Disabling writes for close at 1731757068394Writing region close event to WAL at 1731757068394Closed at 1731757068394 2024-11-16T11:37:48,395 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T11:37:48,396 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731757068395"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731757068395"}]},"ts":"1731757068395"} 2024-11-16T11:37:48,398 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T11:37:48,399 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T11:37:48,400 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731757068399"}]},"ts":"1731757068399"} 2024-11-16T11:37:48,402 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-16T11:37:48,402 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ffee239c077be3582f21707b18f768b4, ASSIGN}] 2024-11-16T11:37:48,404 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ffee239c077be3582f21707b18f768b4, ASSIGN 2024-11-16T11:37:48,405 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ffee239c077be3582f21707b18f768b4, ASSIGN; state=OFFLINE, location=a7948fca2832,45505,1731757067235; forceNewPlan=false, retain=false 2024-11-16T11:37:48,556 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ffee239c077be3582f21707b18f768b4, regionState=OPENING, regionLocation=a7948fca2832,45505,1731757067235 2024-11-16T11:37:48,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ffee239c077be3582f21707b18f768b4, ASSIGN because future has completed 2024-11-16T11:37:48,559 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ffee239c077be3582f21707b18f768b4, server=a7948fca2832,45505,1731757067235}] 2024-11-16T11:37:48,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:48,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:48,715 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:37:48,715 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ffee239c077be3582f21707b18f768b4, NAME => 'TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:37:48,715 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,715 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:37:48,716 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,716 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,717 INFO [StoreOpener-ffee239c077be3582f21707b18f768b4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,718 INFO [StoreOpener-ffee239c077be3582f21707b18f768b4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ffee239c077be3582f21707b18f768b4 columnFamilyName info 2024-11-16T11:37:48,718 DEBUG [StoreOpener-ffee239c077be3582f21707b18f768b4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:37:48,719 INFO [StoreOpener-ffee239c077be3582f21707b18f768b4-1 {}] regionserver.HStore(327): Store=ffee239c077be3582f21707b18f768b4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:37:48,719 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,720 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,720 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,720 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,720 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,722 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,724 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:37:48,724 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ffee239c077be3582f21707b18f768b4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735660, jitterRate=-0.06456071138381958}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T11:37:48,725 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:48,725 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ffee239c077be3582f21707b18f768b4: Running coprocessor pre-open hook at 1731757068716Writing region info on filesystem at 1731757068716Initializing all the Stores at 1731757068716Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757068717 (+1 ms)Cleaning up temporary data from old regions at 1731757068720 (+3 ms)Running coprocessor post-open hooks at 1731757068725 (+5 ms)Region opened successfully at 1731757068725 2024-11-16T11:37:48,726 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., pid=6, masterSystemTime=1731757068711 2024-11-16T11:37:48,729 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:37:48,729 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:37:48,730 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ffee239c077be3582f21707b18f768b4, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,45505,1731757067235 2024-11-16T11:37:48,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ffee239c077be3582f21707b18f768b4, server=a7948fca2832,45505,1731757067235 because future has completed 2024-11-16T11:37:48,737 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T11:37:48,737 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ffee239c077be3582f21707b18f768b4, server=a7948fca2832,45505,1731757067235 in 175 msec 2024-11-16T11:37:48,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T11:37:48,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ffee239c077be3582f21707b18f768b4, ASSIGN in 335 msec 2024-11-16T11:37:48,742 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T11:37:48,742 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731757068742"}]},"ts":"1731757068742"} 2024-11-16T11:37:48,745 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-16T11:37:48,746 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T11:37:48,748 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 370 msec 2024-11-16T11:37:49,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:49,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:49,672 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T11:37:49,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:49,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:50,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:50,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:51,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-16T11:37:51,137 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T11:37:51,138 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T11:37:51,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:51,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:52,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:52,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:53,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:53,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:53,723 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T11:37:53,724 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-16T11:37:54,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:54,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:55,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:55,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:56,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:56,640 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T11:37:56,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:56,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:56,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:37:57,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:57,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:58,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44903 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T11:37:58,427 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-16T11:37:58,427 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-16T11:37:58,435 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-16T11:37:58,435 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:37:58,438 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., hostname=a7948fca2832,45505,1731757067235, seqNum=2] 2024-11-16T11:37:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:58,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffee239c077be3582f21707b18f768b4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:37:58,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/a326a7400843409fa5b64bea509629f8 is 1080, key is row0001/info:/1731757078439/Put/seqid=0 2024-11-16T11:37:58,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741837_1013 (size=12509) 2024-11-16T11:37:58,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741837_1013 (size=12509) 2024-11-16T11:37:58,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/a326a7400843409fa5b64bea509629f8 2024-11-16T11:37:58,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/a326a7400843409fa5b64bea509629f8 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/a326a7400843409fa5b64bea509629f8 2024-11-16T11:37:58,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/a326a7400843409fa5b64bea509629f8, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T11:37:58,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for ffee239c077be3582f21707b18f768b4 in 42ms, sequenceid=11, compaction requested=false 2024-11-16T11:37:58,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffee239c077be3582f21707b18f768b4: 2024-11-16T11:37:58,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on ffee239c077be3582f21707b18f768b4 2024-11-16T11:37:58,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffee239c077be3582f21707b18f768b4 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-16T11:37:58,503 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/91076785a1574190b73c41fd0dd7c34e is 1080, key is row0008/info:/1731757078457/Put/seqid=0 2024-11-16T11:37:58,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741838_1014 (size=26530) 2024-11-16T11:37:58,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741838_1014 (size=26530) 2024-11-16T11:37:58,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/91076785a1574190b73c41fd0dd7c34e 2024-11-16T11:37:58,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/91076785a1574190b73c41fd0dd7c34e as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91076785a1574190b73c41fd0dd7c34e 2024-11-16T11:37:58,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91076785a1574190b73c41fd0dd7c34e, entries=20, sequenceid=34, filesize=25.9 K 2024-11-16T11:37:58,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=5.25 KB/5380 for ffee239c077be3582f21707b18f768b4 in 22ms, sequenceid=34, compaction requested=false 2024-11-16T11:37:58,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffee239c077be3582f21707b18f768b4: 2024-11-16T11:37:58,521 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=38.1 K, sizeToCheck=16.0 K 2024-11-16T11:37:58,521 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:37:58,521 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91076785a1574190b73c41fd0dd7c34e because midkey is the same as first or last row 2024-11-16T11:37:58,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:58,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:59,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:37:59,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:00,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:00,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffee239c077be3582f21707b18f768b4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:38:00,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/7f06b8fced6c4c5cbafbca7d60c8c143 is 1080, key is row0028/info:/1731757078500/Put/seqid=0 2024-11-16T11:38:00,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741839_1015 (size=12509) 2024-11-16T11:38:00,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741839_1015 (size=12509) 2024-11-16T11:38:00,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/7f06b8fced6c4c5cbafbca7d60c8c143 2024-11-16T11:38:00,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/7f06b8fced6c4c5cbafbca7d60c8c143 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/7f06b8fced6c4c5cbafbca7d60c8c143 2024-11-16T11:38:00,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/7f06b8fced6c4c5cbafbca7d60c8c143, entries=7, sequenceid=44, filesize=12.2 K 2024-11-16T11:38:00,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for ffee239c077be3582f21707b18f768b4 in 23ms, sequenceid=44, compaction requested=true 2024-11-16T11:38:00,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffee239c077be3582f21707b18f768b4: 2024-11-16T11:38:00,536 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=50.3 K, sizeToCheck=16.0 K 2024-11-16T11:38:00,536 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:38:00,537 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91076785a1574190b73c41fd0dd7c34e because midkey is the same as first or last row 2024-11-16T11:38:00,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ffee239c077be3582f21707b18f768b4:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:00,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:00,537 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:00,538 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 51548 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:00,538 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): ffee239c077be3582f21707b18f768b4/info is initiating minor compaction (all files) 2024-11-16T11:38:00,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:00,538 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ffee239c077be3582f21707b18f768b4/info in TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:38:00,538 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/a326a7400843409fa5b64bea509629f8, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91076785a1574190b73c41fd0dd7c34e, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/7f06b8fced6c4c5cbafbca7d60c8c143] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp, totalSize=50.3 K 2024-11-16T11:38:00,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffee239c077be3582f21707b18f768b4 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T11:38:00,539 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting a326a7400843409fa5b64bea509629f8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731757078439 2024-11-16T11:38:00,539 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 91076785a1574190b73c41fd0dd7c34e, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731757078457 2024-11-16T11:38:00,539 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7f06b8fced6c4c5cbafbca7d60c8c143, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1731757078500 2024-11-16T11:38:00,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/91f456cf1ec54ae091fba81ca465fd45 is 1080, key is row0035/info:/1731757080514/Put/seqid=0 2024-11-16T11:38:00,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741840_1016 (size=16817) 2024-11-16T11:38:00,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741840_1016 (size=16817) 2024-11-16T11:38:00,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/91f456cf1ec54ae091fba81ca465fd45 2024-11-16T11:38:00,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/91f456cf1ec54ae091fba81ca465fd45 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91f456cf1ec54ae091fba81ca465fd45 2024-11-16T11:38:00,568 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ffee239c077be3582f21707b18f768b4#info#compaction#61 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:00,569 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/82252b7cc8124e098013f6399ce0e129 is 1080, key is row0001/info:/1731757078439/Put/seqid=0 2024-11-16T11:38:00,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91f456cf1ec54ae091fba81ca465fd45, entries=11, sequenceid=58, filesize=16.4 K 2024-11-16T11:38:00,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=17.86 KB/18292 for ffee239c077be3582f21707b18f768b4 in 34ms, sequenceid=58, compaction requested=false 2024-11-16T11:38:00,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffee239c077be3582f21707b18f768b4: 2024-11-16T11:38:00,572 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.8 K, sizeToCheck=16.0 K 2024-11-16T11:38:00,572 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:38:00,572 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91076785a1574190b73c41fd0dd7c34e because midkey is the same as first or last row 2024-11-16T11:38:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:00,573 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffee239c077be3582f21707b18f768b4 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-16T11:38:00,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741841_1017 (size=41747) 2024-11-16T11:38:00,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741841_1017 (size=41747) 2024-11-16T11:38:00,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/5e18b816cda94ca2a4e8f07c2f8a8dc1 is 1080, key is row0046/info:/1731757080539/Put/seqid=0 2024-11-16T11:38:00,580 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/82252b7cc8124e098013f6399ce0e129 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/82252b7cc8124e098013f6399ce0e129 2024-11-16T11:38:00,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741842_1018 (size=24376) 2024-11-16T11:38:00,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741842_1018 (size=24376) 2024-11-16T11:38:00,586 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ffee239c077be3582f21707b18f768b4/info of ffee239c077be3582f21707b18f768b4 into 82252b7cc8124e098013f6399ce0e129(size=40.8 K), total size for store is 57.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:00,586 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ffee239c077be3582f21707b18f768b4: 2024-11-16T11:38:00,586 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., storeName=ffee239c077be3582f21707b18f768b4/info, priority=13, startTime=1731757080537; duration=0sec 2024-11-16T11:38:00,586 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-16T11:38:00,586 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:38:00,586 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/82252b7cc8124e098013f6399ce0e129 because midkey is the same as first or last row 2024-11-16T11:38:00,586 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-16T11:38:00,586 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:38:00,586 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/82252b7cc8124e098013f6399ce0e129 because midkey is the same as first or last row 2024-11-16T11:38:00,586 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-16T11:38:00,586 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:38:00,587 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/82252b7cc8124e098013f6399ce0e129 because midkey is the same as first or last row 2024-11-16T11:38:00,587 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:00,587 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ffee239c077be3582f21707b18f768b4:info 2024-11-16T11:38:00,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:00,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:00,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/5e18b816cda94ca2a4e8f07c2f8a8dc1 2024-11-16T11:38:00,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/5e18b816cda94ca2a4e8f07c2f8a8dc1 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/5e18b816cda94ca2a4e8f07c2f8a8dc1 2024-11-16T11:38:01,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/5e18b816cda94ca2a4e8f07c2f8a8dc1, entries=18, sequenceid=79, filesize=23.8 K 2024-11-16T11:38:01,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for ffee239c077be3582f21707b18f768b4 in 432ms, sequenceid=79, compaction requested=true 2024-11-16T11:38:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffee239c077be3582f21707b18f768b4: 2024-11-16T11:38:01,006 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-11-16T11:38:01,006 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:38:01,006 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/82252b7cc8124e098013f6399ce0e129 because midkey is the same as first or last row 2024-11-16T11:38:01,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ffee239c077be3582f21707b18f768b4:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:01,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:01,006 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:01,007 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82940 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:01,007 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): ffee239c077be3582f21707b18f768b4/info is initiating minor compaction (all files) 2024-11-16T11:38:01,007 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ffee239c077be3582f21707b18f768b4/info in TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:38:01,008 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/82252b7cc8124e098013f6399ce0e129, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91f456cf1ec54ae091fba81ca465fd45, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/5e18b816cda94ca2a4e8f07c2f8a8dc1] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp, totalSize=81.0 K 2024-11-16T11:38:01,008 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 82252b7cc8124e098013f6399ce0e129, keycount=34, bloomtype=ROW, size=40.8 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1731757078439 2024-11-16T11:38:01,009 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 91f456cf1ec54ae091fba81ca465fd45, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1731757080514 2024-11-16T11:38:01,009 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5e18b816cda94ca2a4e8f07c2f8a8dc1, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731757080539 2024-11-16T11:38:01,024 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ffee239c077be3582f21707b18f768b4#info#compaction#63 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:01,025 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/04f0aca1348143449c818a04ba5ff9c5 is 1080, key is row0001/info:/1731757078439/Put/seqid=0 2024-11-16T11:38:01,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741843_1019 (size=73224) 2024-11-16T11:38:01,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741843_1019 (size=73224) 2024-11-16T11:38:01,035 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/04f0aca1348143449c818a04ba5ff9c5 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/04f0aca1348143449c818a04ba5ff9c5 2024-11-16T11:38:01,042 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ffee239c077be3582f21707b18f768b4/info of ffee239c077be3582f21707b18f768b4 into 04f0aca1348143449c818a04ba5ff9c5(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:01,042 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ffee239c077be3582f21707b18f768b4: 2024-11-16T11:38:01,042 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., storeName=ffee239c077be3582f21707b18f768b4/info, priority=13, startTime=1731757081006; duration=0sec 2024-11-16T11:38:01,042 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-16T11:38:01,042 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:38:01,042 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-16T11:38:01,042 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:38:01,042 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-16T11:38:01,042 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T11:38:01,043 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:01,043 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:01,043 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ffee239c077be3582f21707b18f768b4:info 2024-11-16T11:38:01,044 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44903 {}] assignment.AssignmentManager(1355): Split request from a7948fca2832,45505,1731757067235, parent={ENCODED => ffee239c077be3582f21707b18f768b4, NAME => 'TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-16T11:38:01,050 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44903 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=a7948fca2832,45505,1731757067235 2024-11-16T11:38:01,054 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44903 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ffee239c077be3582f21707b18f768b4, daughterA=54197fbe674671b991ac1fbcec8165bd, daughterB=dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,055 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ffee239c077be3582f21707b18f768b4, daughterA=54197fbe674671b991ac1fbcec8165bd, daughterB=dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,055 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ffee239c077be3582f21707b18f768b4, daughterA=54197fbe674671b991ac1fbcec8165bd, daughterB=dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,055 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ffee239c077be3582f21707b18f768b4, daughterA=54197fbe674671b991ac1fbcec8165bd, daughterB=dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ffee239c077be3582f21707b18f768b4, UNASSIGN}] 2024-11-16T11:38:01,062 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ffee239c077be3582f21707b18f768b4, UNASSIGN 2024-11-16T11:38:01,063 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ffee239c077be3582f21707b18f768b4, regionState=CLOSING, regionLocation=a7948fca2832,45505,1731757067235 2024-11-16T11:38:01,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ffee239c077be3582f21707b18f768b4, UNASSIGN because future has completed 2024-11-16T11:38:01,066 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-16T11:38:01,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure ffee239c077be3582f21707b18f768b4, server=a7948fca2832,45505,1731757067235}] 2024-11-16T11:38:01,227 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:01,227 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-16T11:38:01,228 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing ffee239c077be3582f21707b18f768b4, disabling compactions & flushes 2024-11-16T11:38:01,228 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:38:01,228 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:38:01,229 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. after waiting 0 ms 2024-11-16T11:38:01,229 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:38:01,229 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing ffee239c077be3582f21707b18f768b4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T11:38:01,236 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/58d714e256e34f1bb4ed1f4c39fa27d0 is 1080, key is row0064/info:/1731757080575/Put/seqid=0 2024-11-16T11:38:01,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741844_1020 (size=6033) 2024-11-16T11:38:01,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741844_1020 (size=6033) 2024-11-16T11:38:01,242 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/58d714e256e34f1bb4ed1f4c39fa27d0 2024-11-16T11:38:01,249 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/.tmp/info/58d714e256e34f1bb4ed1f4c39fa27d0 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/58d714e256e34f1bb4ed1f4c39fa27d0 2024-11-16T11:38:01,255 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/58d714e256e34f1bb4ed1f4c39fa27d0, entries=1, sequenceid=85, filesize=5.9 K 2024-11-16T11:38:01,256 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ffee239c077be3582f21707b18f768b4 in 27ms, sequenceid=85, compaction requested=false 2024-11-16T11:38:01,258 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/a326a7400843409fa5b64bea509629f8, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91076785a1574190b73c41fd0dd7c34e, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/82252b7cc8124e098013f6399ce0e129, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/7f06b8fced6c4c5cbafbca7d60c8c143, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91f456cf1ec54ae091fba81ca465fd45, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/5e18b816cda94ca2a4e8f07c2f8a8dc1] to archive 2024-11-16T11:38:01,259 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T11:38:01,261 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/a326a7400843409fa5b64bea509629f8 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/a326a7400843409fa5b64bea509629f8 2024-11-16T11:38:01,262 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91076785a1574190b73c41fd0dd7c34e to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91076785a1574190b73c41fd0dd7c34e 2024-11-16T11:38:01,263 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/82252b7cc8124e098013f6399ce0e129 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/82252b7cc8124e098013f6399ce0e129 2024-11-16T11:38:01,265 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/7f06b8fced6c4c5cbafbca7d60c8c143 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/7f06b8fced6c4c5cbafbca7d60c8c143 2024-11-16T11:38:01,266 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91f456cf1ec54ae091fba81ca465fd45 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/91f456cf1ec54ae091fba81ca465fd45 2024-11-16T11:38:01,266 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/5e18b816cda94ca2a4e8f07c2f8a8dc1 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/5e18b816cda94ca2a4e8f07c2f8a8dc1 2024-11-16T11:38:01,272 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-11-16T11:38:01,272 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. 2024-11-16T11:38:01,272 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for ffee239c077be3582f21707b18f768b4: Waiting for close lock at 1731757081228Running coprocessor pre-close hooks at 1731757081228Disabling compacts and flushes for region at 1731757081228Disabling writes for close at 1731757081229 (+1 ms)Obtaining lock to block concurrent updates at 1731757081229Preparing flush snapshotting stores in ffee239c077be3582f21707b18f768b4 at 1731757081229Finished memstore snapshotting TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731757081230 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. at 1731757081231 (+1 ms)Flushing ffee239c077be3582f21707b18f768b4/info: creating writer at 1731757081232 (+1 ms)Flushing ffee239c077be3582f21707b18f768b4/info: appending metadata at 1731757081235 (+3 ms)Flushing ffee239c077be3582f21707b18f768b4/info: closing flushed file at 1731757081235Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@dd82ebf: reopening flushed file at 1731757081248 (+13 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ffee239c077be3582f21707b18f768b4 in 27ms, sequenceid=85, compaction requested=false at 1731757081256 (+8 ms)Writing region close event to WAL at 1731757081268 (+12 ms)Running coprocessor post-close hooks at 1731757081272 (+4 ms)Closed at 1731757081272 2024-11-16T11:38:01,274 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:01,275 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ffee239c077be3582f21707b18f768b4, regionState=CLOSED 2024-11-16T11:38:01,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure ffee239c077be3582f21707b18f768b4, server=a7948fca2832,45505,1731757067235 because future has completed 2024-11-16T11:38:01,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-16T11:38:01,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure ffee239c077be3582f21707b18f768b4, server=a7948fca2832,45505,1731757067235 in 211 msec 2024-11-16T11:38:01,282 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T11:38:01,282 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ffee239c077be3582f21707b18f768b4, UNASSIGN in 219 msec 2024-11-16T11:38:01,291 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:01,294 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=ffee239c077be3582f21707b18f768b4, threads=2 2024-11-16T11:38:01,295 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/58d714e256e34f1bb4ed1f4c39fa27d0 for region: ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:01,295 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/04f0aca1348143449c818a04ba5ff9c5 for region: ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:01,304 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/58d714e256e34f1bb4ed1f4c39fa27d0, top=true 2024-11-16T11:38:01,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741845_1021 (size=27) 2024-11-16T11:38:01,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741845_1021 (size=27) 2024-11-16T11:38:01,311 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/TestLogRolling-testLogRolling=ffee239c077be3582f21707b18f768b4-58d714e256e34f1bb4ed1f4c39fa27d0 for child: dfc50fd7bc83de00085325d9d55f4778, parent: ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:01,312 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/58d714e256e34f1bb4ed1f4c39fa27d0 for region: ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:01,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741846_1022 (size=27) 2024-11-16T11:38:01,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741846_1022 (size=27) 2024-11-16T11:38:01,320 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/04f0aca1348143449c818a04ba5ff9c5 for region: ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:01,322 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region ffee239c077be3582f21707b18f768b4 Daughter A: [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4] storefiles, Daughter B: [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/TestLogRolling-testLogRolling=ffee239c077be3582f21707b18f768b4-58d714e256e34f1bb4ed1f4c39fa27d0] storefiles. 2024-11-16T11:38:01,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741847_1023 (size=71) 2024-11-16T11:38:01,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741847_1023 (size=71) 2024-11-16T11:38:01,331 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:01,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741848_1024 (size=71) 2024-11-16T11:38:01,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741848_1024 (size=71) 2024-11-16T11:38:01,343 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:01,351 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-16T11:38:01,353 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-16T11:38:01,355 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731757081355"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731757081355"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731757081355"}]},"ts":"1731757081355"} 2024-11-16T11:38:01,355 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731757081355"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731757081355"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731757081355"}]},"ts":"1731757081355"} 2024-11-16T11:38:01,355 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731757081355"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731757081355"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731757081355"}]},"ts":"1731757081355"} 2024-11-16T11:38:01,371 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54197fbe674671b991ac1fbcec8165bd, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dfc50fd7bc83de00085325d9d55f4778, ASSIGN}] 2024-11-16T11:38:01,372 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54197fbe674671b991ac1fbcec8165bd, ASSIGN 2024-11-16T11:38:01,372 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dfc50fd7bc83de00085325d9d55f4778, ASSIGN 2024-11-16T11:38:01,373 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dfc50fd7bc83de00085325d9d55f4778, ASSIGN; state=SPLITTING_NEW, location=a7948fca2832,45505,1731757067235; forceNewPlan=false, retain=false 2024-11-16T11:38:01,373 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54197fbe674671b991ac1fbcec8165bd, ASSIGN; state=SPLITTING_NEW, location=a7948fca2832,45505,1731757067235; forceNewPlan=false, retain=false 2024-11-16T11:38:01,523 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=dfc50fd7bc83de00085325d9d55f4778, regionState=OPENING, regionLocation=a7948fca2832,45505,1731757067235 2024-11-16T11:38:01,523 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=54197fbe674671b991ac1fbcec8165bd, regionState=OPENING, regionLocation=a7948fca2832,45505,1731757067235 2024-11-16T11:38:01,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54197fbe674671b991ac1fbcec8165bd, ASSIGN because future has completed 2024-11-16T11:38:01,527 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54197fbe674671b991ac1fbcec8165bd, server=a7948fca2832,45505,1731757067235}] 2024-11-16T11:38:01,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dfc50fd7bc83de00085325d9d55f4778, ASSIGN because future has completed 2024-11-16T11:38:01,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure dfc50fd7bc83de00085325d9d55f4778, server=a7948fca2832,45505,1731757067235}] 2024-11-16T11:38:01,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:01,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:01,682 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:01,683 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 54197fbe674671b991ac1fbcec8165bd, NAME => 'TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-16T11:38:01,683 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,683 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:38:01,683 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,683 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,684 INFO [StoreOpener-54197fbe674671b991ac1fbcec8165bd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,685 INFO [StoreOpener-54197fbe674671b991ac1fbcec8165bd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 54197fbe674671b991ac1fbcec8165bd columnFamilyName info 2024-11-16T11:38:01,685 DEBUG [StoreOpener-54197fbe674671b991ac1fbcec8165bd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:01,697 DEBUG [StoreOpener-54197fbe674671b991ac1fbcec8165bd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4->hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/04f0aca1348143449c818a04ba5ff9c5-bottom 2024-11-16T11:38:01,698 INFO [StoreOpener-54197fbe674671b991ac1fbcec8165bd-1 {}] regionserver.HStore(327): Store=54197fbe674671b991ac1fbcec8165bd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:38:01,698 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,699 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,700 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,700 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,700 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,702 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,703 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 54197fbe674671b991ac1fbcec8165bd; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791311, jitterRate=0.0062049925327301025}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T11:38:01,703 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:01,703 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 54197fbe674671b991ac1fbcec8165bd: Running coprocessor pre-open hook at 1731757081683Writing region info on filesystem at 1731757081683Initializing all the Stores at 1731757081684 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757081684Cleaning up temporary data from old regions at 1731757081700 (+16 ms)Running coprocessor post-open hooks at 1731757081703 (+3 ms)Region opened successfully at 1731757081703 2024-11-16T11:38:01,704 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd., pid=12, masterSystemTime=1731757081679 2024-11-16T11:38:01,704 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 54197fbe674671b991ac1fbcec8165bd:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:01,704 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:01,704 DEBUG [RS:0;a7948fca2832:45505-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-16T11:38:01,705 INFO [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:01,705 DEBUG [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.HStore(1541): 54197fbe674671b991ac1fbcec8165bd/info is initiating minor compaction (all files) 2024-11-16T11:38:01,705 INFO [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 54197fbe674671b991ac1fbcec8165bd/info in TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:01,705 INFO [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4->hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/04f0aca1348143449c818a04ba5ff9c5-bottom] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/.tmp, totalSize=71.5 K 2024-11-16T11:38:01,706 DEBUG [RS:0;a7948fca2832:45505-longCompactions-0 {}] compactions.Compactor(225): Compacting 04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731757078439 2024-11-16T11:38:01,707 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:01,707 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:01,707 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:01,707 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => dfc50fd7bc83de00085325d9d55f4778, NAME => 'TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-16T11:38:01,707 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=54197fbe674671b991ac1fbcec8165bd, regionState=OPEN, openSeqNum=89, regionLocation=a7948fca2832,45505,1731757067235 2024-11-16T11:38:01,707 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,707 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:38:01,707 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,707 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,709 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-16T11:38:01,709 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-16T11:38:01,709 INFO [StoreOpener-dfc50fd7bc83de00085325d9d55f4778-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-16T11:38:01,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 54197fbe674671b991ac1fbcec8165bd, server=a7948fca2832,45505,1731757067235 because future has completed 2024-11-16T11:38:01,710 INFO [StoreOpener-dfc50fd7bc83de00085325d9d55f4778-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dfc50fd7bc83de00085325d9d55f4778 columnFamilyName info 2024-11-16T11:38:01,710 DEBUG [StoreOpener-dfc50fd7bc83de00085325d9d55f4778-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:01,713 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-16T11:38:01,713 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 54197fbe674671b991ac1fbcec8165bd, server=a7948fca2832,45505,1731757067235 in 185 msec 2024-11-16T11:38:01,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=54197fbe674671b991ac1fbcec8165bd, ASSIGN in 342 msec 2024-11-16T11:38:01,722 DEBUG [StoreOpener-dfc50fd7bc83de00085325d9d55f4778-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4->hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/04f0aca1348143449c818a04ba5ff9c5-top 2024-11-16T11:38:01,726 INFO [RS:0;a7948fca2832:45505-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54197fbe674671b991ac1fbcec8165bd#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:01,727 DEBUG [RS:0;a7948fca2832:45505-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/.tmp/info/9c5584209e044248afe6ab4e0b7441dc is 1080, key is row0001/info:/1731757078439/Put/seqid=0 2024-11-16T11:38:01,727 DEBUG [StoreOpener-dfc50fd7bc83de00085325d9d55f4778-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/TestLogRolling-testLogRolling=ffee239c077be3582f21707b18f768b4-58d714e256e34f1bb4ed1f4c39fa27d0 2024-11-16T11:38:01,727 INFO [StoreOpener-dfc50fd7bc83de00085325d9d55f4778-1 {}] regionserver.HStore(327): Store=dfc50fd7bc83de00085325d9d55f4778/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:38:01,727 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,728 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,729 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,729 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,729 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/info/e61ce7f7e5f343759980735c4aae0965 is 193, key is TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778./info:regioninfo/1731757081523/Put/seqid=0 2024-11-16T11:38:01,731 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,732 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened dfc50fd7bc83de00085325d9d55f4778; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821402, jitterRate=0.0444675087928772}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T11:38:01,733 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:01,733 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for dfc50fd7bc83de00085325d9d55f4778: Running coprocessor pre-open hook at 1731757081707Writing region info on filesystem at 1731757081707Initializing all the Stores at 1731757081709 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757081709Cleaning up temporary data from old regions at 1731757081729 (+20 ms)Running coprocessor post-open hooks at 1731757081733 (+4 ms)Region opened successfully at 1731757081733 2024-11-16T11:38:01,734 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., pid=13, masterSystemTime=1731757081679 2024-11-16T11:38:01,734 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store dfc50fd7bc83de00085325d9d55f4778:info, priority=-2147483648, current under compaction store size is 2 2024-11-16T11:38:01,734 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-16T11:38:01,734 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:01,734 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:01,734 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): dfc50fd7bc83de00085325d9d55f4778/info is initiating minor compaction (all files) 2024-11-16T11:38:01,735 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dfc50fd7bc83de00085325d9d55f4778/info in TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:01,735 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4->hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/04f0aca1348143449c818a04ba5ff9c5-top, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/TestLogRolling-testLogRolling=ffee239c077be3582f21707b18f768b4-58d714e256e34f1bb4ed1f4c39fa27d0] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp, totalSize=77.4 K 2024-11-16T11:38:01,735 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731757078439 2024-11-16T11:38:01,735 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=ffee239c077be3582f21707b18f768b4-58d714e256e34f1bb4ed1f4c39fa27d0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731757080575 2024-11-16T11:38:01,736 DEBUG [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:01,736 INFO [RS_OPEN_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:01,737 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=dfc50fd7bc83de00085325d9d55f4778, regionState=OPEN, openSeqNum=89, regionLocation=a7948fca2832,45505,1731757067235 2024-11-16T11:38:01,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741849_1025 (size=70862) 2024-11-16T11:38:01,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741849_1025 (size=70862) 2024-11-16T11:38:01,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure dfc50fd7bc83de00085325d9d55f4778, server=a7948fca2832,45505,1731757067235 because future has completed 2024-11-16T11:38:01,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741850_1026 (size=9847) 2024-11-16T11:38:01,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741850_1026 (size=9847) 2024-11-16T11:38:01,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-16T11:38:01,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure dfc50fd7bc83de00085325d9d55f4778, server=a7948fca2832,45505,1731757067235 in 214 msec 2024-11-16T11:38:01,746 DEBUG [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/.tmp/info/9c5584209e044248afe6ab4e0b7441dc as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/info/9c5584209e044248afe6ab4e0b7441dc 2024-11-16T11:38:01,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-16T11:38:01,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=dfc50fd7bc83de00085325d9d55f4778, ASSIGN in 374 msec 2024-11-16T11:38:01,751 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=ffee239c077be3582f21707b18f768b4, daughterA=54197fbe674671b991ac1fbcec8165bd, daughterB=dfc50fd7bc83de00085325d9d55f4778 in 698 msec 2024-11-16T11:38:01,753 INFO [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 54197fbe674671b991ac1fbcec8165bd/info of 54197fbe674671b991ac1fbcec8165bd into 9c5584209e044248afe6ab4e0b7441dc(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:01,753 DEBUG [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 54197fbe674671b991ac1fbcec8165bd: 2024-11-16T11:38:01,753 INFO [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd., storeName=54197fbe674671b991ac1fbcec8165bd/info, priority=15, startTime=1731757081704; duration=0sec 2024-11-16T11:38:01,753 DEBUG [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:01,753 DEBUG [RS:0;a7948fca2832:45505-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54197fbe674671b991ac1fbcec8165bd:info 2024-11-16T11:38:01,759 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dfc50fd7bc83de00085325d9d55f4778#info#compaction#67 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:01,759 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/29fdc84c0fd541a89b9b909ecd0f32cc is 1080, key is row0062/info:/1731757080570/Put/seqid=0 2024-11-16T11:38:01,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741851_1027 (size=8359) 2024-11-16T11:38:01,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741851_1027 (size=8359) 2024-11-16T11:38:01,769 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/29fdc84c0fd541a89b9b909ecd0f32cc as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/29fdc84c0fd541a89b9b909ecd0f32cc 2024-11-16T11:38:01,775 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in dfc50fd7bc83de00085325d9d55f4778/info of dfc50fd7bc83de00085325d9d55f4778 into 29fdc84c0fd541a89b9b909ecd0f32cc(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:01,775 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:01,775 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., storeName=dfc50fd7bc83de00085325d9d55f4778/info, priority=14, startTime=1731757081734; duration=0sec 2024-11-16T11:38:01,775 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:01,775 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dfc50fd7bc83de00085325d9d55f4778:info 2024-11-16T11:38:02,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/info/e61ce7f7e5f343759980735c4aae0965 2024-11-16T11:38:02,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/ns/231b9f0601954580a74eba5eea36c766 is 43, key is default/ns:d/1731757068318/Put/seqid=0 2024-11-16T11:38:02,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741852_1028 (size=5153) 2024-11-16T11:38:02,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741852_1028 (size=5153) 2024-11-16T11:38:02,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/ns/231b9f0601954580a74eba5eea36c766 2024-11-16T11:38:02,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/table/2194224326d740e1bf0d47901da412a8 is 65, key is TestLogRolling-testLogRolling/table:state/1731757068742/Put/seqid=0 2024-11-16T11:38:02,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741853_1029 (size=5340) 2024-11-16T11:38:02,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741853_1029 (size=5340) 2024-11-16T11:38:02,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/table/2194224326d740e1bf0d47901da412a8 2024-11-16T11:38:02,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/info/e61ce7f7e5f343759980735c4aae0965 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/info/e61ce7f7e5f343759980735c4aae0965 2024-11-16T11:38:02,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/info/e61ce7f7e5f343759980735c4aae0965, entries=30, sequenceid=17, filesize=9.6 K 2024-11-16T11:38:02,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/ns/231b9f0601954580a74eba5eea36c766 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/ns/231b9f0601954580a74eba5eea36c766 2024-11-16T11:38:02,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/ns/231b9f0601954580a74eba5eea36c766, entries=2, sequenceid=17, filesize=5.0 K 2024-11-16T11:38:02,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/table/2194224326d740e1bf0d47901da412a8 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/table/2194224326d740e1bf0d47901da412a8 2024-11-16T11:38:02,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/table/2194224326d740e1bf0d47901da412a8, entries=2, sequenceid=17, filesize=5.2 K 2024-11-16T11:38:02,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 509ms, sequenceid=17, compaction requested=false 2024-11-16T11:38:02,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T11:38:02,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50670 deadline: 1731757092577, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. is not online on a7948fca2832,45505,1731757067235 2024-11-16T11:38:02,600 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., hostname=a7948fca2832,45505,1731757067235, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., hostname=a7948fca2832,45505,1731757067235, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. is not online on a7948fca2832,45505,1731757067235 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T11:38:02,600 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., hostname=a7948fca2832,45505,1731757067235, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4. is not online on a7948fca2832,45505,1731757067235 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T11:38:02,601 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731757068375.ffee239c077be3582f21707b18f768b4., hostname=a7948fca2832,45505,1731757067235, seqNum=2 from cache 2024-11-16T11:38:02,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:02,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:03,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:03,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:04,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:04,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:05,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:05,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:06,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:06,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:06,774 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T11:38:06,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:06,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T11:38:07,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:07,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:08,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:08,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:09,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:09,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:10,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:10,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:11,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:11,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:12,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:12,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:12,704 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., hostname=a7948fca2832,45505,1731757067235, seqNum=89] 2024-11-16T11:38:12,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:12,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:38:12,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/260c9b9b962447f6af2a12d3ddaf6b95 is 1080, key is row0065/info:/1731757092705/Put/seqid=0 2024-11-16T11:38:12,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741854_1030 (size=12509) 2024-11-16T11:38:12,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741854_1030 (size=12509) 2024-11-16T11:38:12,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/260c9b9b962447f6af2a12d3ddaf6b95 2024-11-16T11:38:12,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/260c9b9b962447f6af2a12d3ddaf6b95 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/260c9b9b962447f6af2a12d3ddaf6b95 2024-11-16T11:38:12,743 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/260c9b9b962447f6af2a12d3ddaf6b95, entries=7, sequenceid=99, filesize=12.2 K 2024-11-16T11:38:12,747 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for dfc50fd7bc83de00085325d9d55f4778 in 30ms, sequenceid=99, compaction requested=false 2024-11-16T11:38:12,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:12,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:12,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T11:38:12,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/1ccd5242bbb348a69290767c6ad510ce is 1080, key is row0072/info:/1731757092718/Put/seqid=0 2024-11-16T11:38:12,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741855_1031 (size=17894) 2024-11-16T11:38:12,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741855_1031 (size=17894) 2024-11-16T11:38:12,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/1ccd5242bbb348a69290767c6ad510ce 2024-11-16T11:38:12,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/1ccd5242bbb348a69290767c6ad510ce as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/1ccd5242bbb348a69290767c6ad510ce 2024-11-16T11:38:12,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/1ccd5242bbb348a69290767c6ad510ce, entries=12, sequenceid=114, filesize=17.5 K 2024-11-16T11:38:12,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for dfc50fd7bc83de00085325d9d55f4778 in 38ms, sequenceid=114, compaction requested=true 2024-11-16T11:38:12,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:12,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dfc50fd7bc83de00085325d9d55f4778:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:12,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:12,790 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:12,791 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:12,791 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): dfc50fd7bc83de00085325d9d55f4778/info is initiating minor compaction (all files) 2024-11-16T11:38:12,791 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dfc50fd7bc83de00085325d9d55f4778/info in TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:12,792 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/29fdc84c0fd541a89b9b909ecd0f32cc, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/260c9b9b962447f6af2a12d3ddaf6b95, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/1ccd5242bbb348a69290767c6ad510ce] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp, totalSize=37.9 K 2024-11-16T11:38:12,792 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 29fdc84c0fd541a89b9b909ecd0f32cc, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731757080570 2024-11-16T11:38:12,792 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 260c9b9b962447f6af2a12d3ddaf6b95, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731757092705 2024-11-16T11:38:12,793 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1ccd5242bbb348a69290767c6ad510ce, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1731757092718 2024-11-16T11:38:12,806 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dfc50fd7bc83de00085325d9d55f4778#info#compaction#72 average throughput is 22.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:12,806 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/54336d6cb7e94cb8a47fe25b9be85b8e is 1080, key is row0062/info:/1731757080570/Put/seqid=0 2024-11-16T11:38:12,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741856_1032 (size=28952) 2024-11-16T11:38:12,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741856_1032 (size=28952) 2024-11-16T11:38:12,817 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/54336d6cb7e94cb8a47fe25b9be85b8e as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/54336d6cb7e94cb8a47fe25b9be85b8e 2024-11-16T11:38:12,828 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dfc50fd7bc83de00085325d9d55f4778/info of dfc50fd7bc83de00085325d9d55f4778 into 54336d6cb7e94cb8a47fe25b9be85b8e(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:12,828 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:12,828 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., storeName=dfc50fd7bc83de00085325d9d55f4778/info, priority=13, startTime=1731757092790; duration=0sec 2024-11-16T11:38:12,828 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:12,828 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dfc50fd7bc83de00085325d9d55f4778:info 2024-11-16T11:38:13,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:13,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:14,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:14,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:14,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:14,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-16T11:38:14,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/09084539bab340f0a30411ba3589ee3b is 1080, key is row0084/info:/1731757092753/Put/seqid=0 2024-11-16T11:38:14,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741857_1033 (size=20066) 2024-11-16T11:38:14,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741857_1033 (size=20066) 2024-11-16T11:38:14,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/09084539bab340f0a30411ba3589ee3b 2024-11-16T11:38:14,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/09084539bab340f0a30411ba3589ee3b as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/09084539bab340f0a30411ba3589ee3b 2024-11-16T11:38:14,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/09084539bab340f0a30411ba3589ee3b, entries=14, sequenceid=132, filesize=19.6 K 2024-11-16T11:38:14,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for dfc50fd7bc83de00085325d9d55f4778 in 27ms, sequenceid=132, compaction requested=false 2024-11-16T11:38:14,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:14,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:14,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T11:38:14,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/fdca348422124873ba5eb0701c12dbe5 is 1080, key is row0098/info:/1731757094782/Put/seqid=0 2024-11-16T11:38:14,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741858_1034 (size=16828) 2024-11-16T11:38:14,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741858_1034 (size=16828) 2024-11-16T11:38:14,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=146 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/fdca348422124873ba5eb0701c12dbe5 2024-11-16T11:38:14,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/fdca348422124873ba5eb0701c12dbe5 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fdca348422124873ba5eb0701c12dbe5 2024-11-16T11:38:14,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fdca348422124873ba5eb0701c12dbe5, entries=11, sequenceid=146, filesize=16.4 K 2024-11-16T11:38:14,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for dfc50fd7bc83de00085325d9d55f4778 in 22ms, sequenceid=146, compaction requested=true 2024-11-16T11:38:14,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:14,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dfc50fd7bc83de00085325d9d55f4778:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:14,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:14,831 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:14,832 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 65846 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:14,833 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): dfc50fd7bc83de00085325d9d55f4778/info is initiating minor compaction (all files) 2024-11-16T11:38:14,833 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dfc50fd7bc83de00085325d9d55f4778/info in TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:14,833 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/54336d6cb7e94cb8a47fe25b9be85b8e, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/09084539bab340f0a30411ba3589ee3b, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fdca348422124873ba5eb0701c12dbe5] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp, totalSize=64.3 K 2024-11-16T11:38:14,833 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 54336d6cb7e94cb8a47fe25b9be85b8e, keycount=22, bloomtype=ROW, size=28.3 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1731757080570 2024-11-16T11:38:14,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:14,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T11:38:14,834 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 09084539bab340f0a30411ba3589ee3b, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731757092753 2024-11-16T11:38:14,834 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting fdca348422124873ba5eb0701c12dbe5, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1731757094782 2024-11-16T11:38:14,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/fcacb20fa45d4fd29646cd216356d8a0 is 1080, key is row0109/info:/1731757094811/Put/seqid=0 2024-11-16T11:38:14,854 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dfc50fd7bc83de00085325d9d55f4778#info#compaction#76 average throughput is 24.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:14,855 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/a263c307cbfb4790b5c334fea32cf380 is 1080, key is row0062/info:/1731757080570/Put/seqid=0 2024-11-16T11:38:14,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741859_1035 (size=16828) 2024-11-16T11:38:14,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741859_1035 (size=16828) 2024-11-16T11:38:14,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/fcacb20fa45d4fd29646cd216356d8a0 2024-11-16T11:38:14,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741860_1036 (size=56032) 2024-11-16T11:38:14,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741860_1036 (size=56032) 2024-11-16T11:38:14,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/fcacb20fa45d4fd29646cd216356d8a0 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fcacb20fa45d4fd29646cd216356d8a0 2024-11-16T11:38:14,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fcacb20fa45d4fd29646cd216356d8a0, entries=11, sequenceid=160, filesize=16.4 K 2024-11-16T11:38:14,883 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for dfc50fd7bc83de00085325d9d55f4778 in 50ms, sequenceid=160, compaction requested=false 2024-11-16T11:38:14,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:15,282 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/a263c307cbfb4790b5c334fea32cf380 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a263c307cbfb4790b5c334fea32cf380 2024-11-16T11:38:15,290 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dfc50fd7bc83de00085325d9d55f4778/info of dfc50fd7bc83de00085325d9d55f4778 into a263c307cbfb4790b5c334fea32cf380(size=54.7 K), total size for store is 71.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:15,290 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:15,290 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., storeName=dfc50fd7bc83de00085325d9d55f4778/info, priority=13, startTime=1731757094831; duration=0sec 2024-11-16T11:38:15,290 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:15,291 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dfc50fd7bc83de00085325d9d55f4778:info 2024-11-16T11:38:15,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:15,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:16,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:16,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:16,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:16,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-16T11:38:16,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/16c03c7d55d44340a5c8f9394b7135a5 is 1080, key is row0120/info:/1731757094835/Put/seqid=0 2024-11-16T11:38:16,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741861_1037 (size=15750) 2024-11-16T11:38:16,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741861_1037 (size=15750) 2024-11-16T11:38:16,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/16c03c7d55d44340a5c8f9394b7135a5 2024-11-16T11:38:16,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/16c03c7d55d44340a5c8f9394b7135a5 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/16c03c7d55d44340a5c8f9394b7135a5 2024-11-16T11:38:16,883 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/16c03c7d55d44340a5c8f9394b7135a5, entries=10, sequenceid=174, filesize=15.4 K 2024-11-16T11:38:16,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for dfc50fd7bc83de00085325d9d55f4778 in 25ms, sequenceid=174, compaction requested=true 2024-11-16T11:38:16,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:16,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dfc50fd7bc83de00085325d9d55f4778:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:16,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:16,885 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:16,886 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 88610 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:16,886 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): dfc50fd7bc83de00085325d9d55f4778/info is initiating minor compaction (all files) 2024-11-16T11:38:16,886 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dfc50fd7bc83de00085325d9d55f4778/info in TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:16,886 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a263c307cbfb4790b5c334fea32cf380, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fcacb20fa45d4fd29646cd216356d8a0, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/16c03c7d55d44340a5c8f9394b7135a5] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp, totalSize=86.5 K 2024-11-16T11:38:16,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:16,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T11:38:16,886 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting a263c307cbfb4790b5c334fea32cf380, keycount=47, bloomtype=ROW, size=54.7 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1731757080570 2024-11-16T11:38:16,887 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting fcacb20fa45d4fd29646cd216356d8a0, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1731757094811 2024-11-16T11:38:16,887 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 16c03c7d55d44340a5c8f9394b7135a5, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731757094835 2024-11-16T11:38:16,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/23b07ead7bce418e94d3d8e3ce2a2322 is 1080, key is row0130/info:/1731757096860/Put/seqid=0 2024-11-16T11:38:16,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741862_1038 (size=17906) 2024-11-16T11:38:16,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741862_1038 (size=17906) 2024-11-16T11:38:16,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/23b07ead7bce418e94d3d8e3ce2a2322 2024-11-16T11:38:16,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/23b07ead7bce418e94d3d8e3ce2a2322 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/23b07ead7bce418e94d3d8e3ce2a2322 2024-11-16T11:38:16,909 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dfc50fd7bc83de00085325d9d55f4778#info#compaction#79 average throughput is 34.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:16,910 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/f3c3a0991c5b47369c276c9e91e218a9 is 1080, key is row0062/info:/1731757080570/Put/seqid=0 2024-11-16T11:38:16,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/23b07ead7bce418e94d3d8e3ce2a2322, entries=12, sequenceid=189, filesize=17.5 K 2024-11-16T11:38:16,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for dfc50fd7bc83de00085325d9d55f4778 in 29ms, sequenceid=189, compaction requested=false 2024-11-16T11:38:16,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:16,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741863_1039 (size=78909) 2024-11-16T11:38:16,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741863_1039 (size=78909) 2024-11-16T11:38:16,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:16,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T11:38:16,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/df8edb34fa0a4676909818f6af3f1c6b is 1080, key is row0142/info:/1731757096888/Put/seqid=0 2024-11-16T11:38:16,922 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/f3c3a0991c5b47369c276c9e91e218a9 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/f3c3a0991c5b47369c276c9e91e218a9 2024-11-16T11:38:16,928 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dfc50fd7bc83de00085325d9d55f4778/info of dfc50fd7bc83de00085325d9d55f4778 into f3c3a0991c5b47369c276c9e91e218a9(size=77.1 K), total size for store is 94.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:16,928 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:16,928 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., storeName=dfc50fd7bc83de00085325d9d55f4778/info, priority=13, startTime=1731757096885; duration=0sec 2024-11-16T11:38:16,928 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:16,928 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dfc50fd7bc83de00085325d9d55f4778:info 2024-11-16T11:38:16,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741864_1040 (size=17906) 2024-11-16T11:38:16,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741864_1040 (size=17906) 2024-11-16T11:38:16,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/df8edb34fa0a4676909818f6af3f1c6b 2024-11-16T11:38:16,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/df8edb34fa0a4676909818f6af3f1c6b as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/df8edb34fa0a4676909818f6af3f1c6b 2024-11-16T11:38:16,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/df8edb34fa0a4676909818f6af3f1c6b, entries=12, sequenceid=204, filesize=17.5 K 2024-11-16T11:38:16,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for dfc50fd7bc83de00085325d9d55f4778 in 23ms, sequenceid=204, compaction requested=true 2024-11-16T11:38:16,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:16,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dfc50fd7bc83de00085325d9d55f4778:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:16,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:16,940 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:16,941 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 114721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:16,941 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): dfc50fd7bc83de00085325d9d55f4778/info is initiating minor compaction (all files) 2024-11-16T11:38:16,941 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dfc50fd7bc83de00085325d9d55f4778/info in TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:16,942 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/f3c3a0991c5b47369c276c9e91e218a9, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/23b07ead7bce418e94d3d8e3ce2a2322, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/df8edb34fa0a4676909818f6af3f1c6b] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp, totalSize=112.0 K 2024-11-16T11:38:16,942 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting f3c3a0991c5b47369c276c9e91e218a9, keycount=68, bloomtype=ROW, size=77.1 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731757080570 2024-11-16T11:38:16,942 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 23b07ead7bce418e94d3d8e3ce2a2322, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731757096860 2024-11-16T11:38:16,943 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting df8edb34fa0a4676909818f6af3f1c6b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731757096888 2024-11-16T11:38:16,953 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dfc50fd7bc83de00085325d9d55f4778#info#compaction#81 average throughput is 47.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:16,954 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/2882f464febd479c946236e0b4059443 is 1080, key is row0062/info:/1731757080570/Put/seqid=0 2024-11-16T11:38:16,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741865_1041 (size=104891) 2024-11-16T11:38:16,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741865_1041 (size=104891) 2024-11-16T11:38:16,965 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/2882f464febd479c946236e0b4059443 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/2882f464febd479c946236e0b4059443 2024-11-16T11:38:16,971 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dfc50fd7bc83de00085325d9d55f4778/info of dfc50fd7bc83de00085325d9d55f4778 into 2882f464febd479c946236e0b4059443(size=102.4 K), total size for store is 102.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:16,971 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:16,971 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., storeName=dfc50fd7bc83de00085325d9d55f4778/info, priority=13, startTime=1731757096940; duration=0sec 2024-11-16T11:38:16,971 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:16,971 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dfc50fd7bc83de00085325d9d55f4778:info 2024-11-16T11:38:17,037 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T11:38:17,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:17,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:18,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:18,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:18,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:18,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-16T11:38:18,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/5dbc3a1885fa49ff96d00a7895a93f9e is 1080, key is row0154/info:/1731757096918/Put/seqid=0 2024-11-16T11:38:18,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741866_1042 (size=13594) 2024-11-16T11:38:18,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741866_1042 (size=13594) 2024-11-16T11:38:18,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/5dbc3a1885fa49ff96d00a7895a93f9e 2024-11-16T11:38:18,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/5dbc3a1885fa49ff96d00a7895a93f9e as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/5dbc3a1885fa49ff96d00a7895a93f9e 2024-11-16T11:38:18,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/5dbc3a1885fa49ff96d00a7895a93f9e, entries=8, sequenceid=217, filesize=13.3 K 2024-11-16T11:38:18,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for dfc50fd7bc83de00085325d9d55f4778 in 37ms, sequenceid=217, compaction requested=false 2024-11-16T11:38:18,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:18,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T11:38:18,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/3d8ae8fa551c4e48abfc5a6f02a62293 is 1080, key is row0162/info:/1731757098935/Put/seqid=0 2024-11-16T11:38:18,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741867_1043 (size=17906) 2024-11-16T11:38:18,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741867_1043 (size=17906) 2024-11-16T11:38:18,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/3d8ae8fa551c4e48abfc5a6f02a62293 2024-11-16T11:38:19,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/3d8ae8fa551c4e48abfc5a6f02a62293 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/3d8ae8fa551c4e48abfc5a6f02a62293 2024-11-16T11:38:19,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=dfc50fd7bc83de00085325d9d55f4778, server=a7948fca2832,45505,1731757067235 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-16T11:38:19,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50670 deadline: 1731757109004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=dfc50fd7bc83de00085325d9d55f4778, server=a7948fca2832,45505,1731757067235 2024-11-16T11:38:19,005 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., hostname=a7948fca2832,45505,1731757067235, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., hostname=a7948fca2832,45505,1731757067235, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=dfc50fd7bc83de00085325d9d55f4778, server=a7948fca2832,45505,1731757067235 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T11:38:19,005 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., hostname=a7948fca2832,45505,1731757067235, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=dfc50fd7bc83de00085325d9d55f4778, server=a7948fca2832,45505,1731757067235 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T11:38:19,005 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., hostname=a7948fca2832,45505,1731757067235, seqNum=89 because the exception is null or not the one we care about 2024-11-16T11:38:19,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/3d8ae8fa551c4e48abfc5a6f02a62293, entries=12, sequenceid=232, filesize=17.5 K 2024-11-16T11:38:19,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for dfc50fd7bc83de00085325d9d55f4778 in 34ms, sequenceid=232, compaction requested=true 2024-11-16T11:38:19,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:19,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dfc50fd7bc83de00085325d9d55f4778:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:19,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:19,006 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:19,007 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136391 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:19,007 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): dfc50fd7bc83de00085325d9d55f4778/info is initiating minor compaction (all files) 2024-11-16T11:38:19,007 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dfc50fd7bc83de00085325d9d55f4778/info in TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:19,008 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/2882f464febd479c946236e0b4059443, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/5dbc3a1885fa49ff96d00a7895a93f9e, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/3d8ae8fa551c4e48abfc5a6f02a62293] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp, totalSize=133.2 K 2024-11-16T11:38:19,008 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2882f464febd479c946236e0b4059443, keycount=92, bloomtype=ROW, size=102.4 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731757080570 2024-11-16T11:38:19,008 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5dbc3a1885fa49ff96d00a7895a93f9e, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731757096918 2024-11-16T11:38:19,008 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3d8ae8fa551c4e48abfc5a6f02a62293, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1731757098935 2024-11-16T11:38:19,018 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dfc50fd7bc83de00085325d9d55f4778#info#compaction#84 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:19,019 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/ba731d86e1b24432a9e011bc0ba5d8b7 is 1080, key is row0062/info:/1731757080570/Put/seqid=0 2024-11-16T11:38:19,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741868_1044 (size=126685) 2024-11-16T11:38:19,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741868_1044 (size=126685) 2024-11-16T11:38:19,030 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/ba731d86e1b24432a9e011bc0ba5d8b7 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ba731d86e1b24432a9e011bc0ba5d8b7 2024-11-16T11:38:19,037 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dfc50fd7bc83de00085325d9d55f4778/info of dfc50fd7bc83de00085325d9d55f4778 into ba731d86e1b24432a9e011bc0ba5d8b7(size=123.7 K), total size for store is 123.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:19,037 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:19,037 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., storeName=dfc50fd7bc83de00085325d9d55f4778/info, priority=13, startTime=1731757099006; duration=0sec 2024-11-16T11:38:19,037 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:19,037 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dfc50fd7bc83de00085325d9d55f4778:info 2024-11-16T11:38:19,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:19,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:20,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:20,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:21,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:21,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:22,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:22,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:23,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:23,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:24,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:24,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:25,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:25,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:26,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:26,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:27,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:27,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:28,333 INFO [master/a7948fca2832:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T11:38:28,333 INFO [master/a7948fca2832:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T11:38:28,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:28,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:29,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-16T11:38:29,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/ef2e80bf0e1849c0970ea714bb3ad9aa is 1080, key is row0174/info:/1731757098974/Put/seqid=0 2024-11-16T11:38:29,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741869_1045 (size=24394) 2024-11-16T11:38:29,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741869_1045 (size=24394) 2024-11-16T11:38:29,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/ef2e80bf0e1849c0970ea714bb3ad9aa 2024-11-16T11:38:29,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/ef2e80bf0e1849c0970ea714bb3ad9aa as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ef2e80bf0e1849c0970ea714bb3ad9aa 2024-11-16T11:38:29,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ef2e80bf0e1849c0970ea714bb3ad9aa, entries=18, sequenceid=254, filesize=23.8 K 2024-11-16T11:38:29,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for dfc50fd7bc83de00085325d9d55f4778 in 25ms, sequenceid=254, compaction requested=false 2024-11-16T11:38:29,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:29,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:29,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:30,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:30,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:31,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:31,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:38:31,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/08f0b14e16914108a05241a0abb9c168 is 1080, key is row0192/info:/1731757109041/Put/seqid=0 2024-11-16T11:38:31,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741870_1046 (size=12523) 2024-11-16T11:38:31,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741870_1046 (size=12523) 2024-11-16T11:38:31,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/08f0b14e16914108a05241a0abb9c168 2024-11-16T11:38:31,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/08f0b14e16914108a05241a0abb9c168 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/08f0b14e16914108a05241a0abb9c168 2024-11-16T11:38:31,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/08f0b14e16914108a05241a0abb9c168, entries=7, sequenceid=264, filesize=12.2 K 2024-11-16T11:38:31,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for dfc50fd7bc83de00085325d9d55f4778 in 26ms, sequenceid=264, compaction requested=true 2024-11-16T11:38:31,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:31,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:31,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dfc50fd7bc83de00085325d9d55f4778:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:31,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:31,090 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:31,090 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-16T11:38:31,091 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 163602 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:31,091 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): dfc50fd7bc83de00085325d9d55f4778/info is initiating minor compaction (all files) 2024-11-16T11:38:31,091 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dfc50fd7bc83de00085325d9d55f4778/info in TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:31,091 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ba731d86e1b24432a9e011bc0ba5d8b7, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ef2e80bf0e1849c0970ea714bb3ad9aa, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/08f0b14e16914108a05241a0abb9c168] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp, totalSize=159.8 K 2024-11-16T11:38:31,091 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting ba731d86e1b24432a9e011bc0ba5d8b7, keycount=112, bloomtype=ROW, size=123.7 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1731757080570 2024-11-16T11:38:31,092 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting ef2e80bf0e1849c0970ea714bb3ad9aa, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1731757098974 2024-11-16T11:38:31,092 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 08f0b14e16914108a05241a0abb9c168, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1731757109041 2024-11-16T11:38:31,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/a5c397ac65114204a14ac6ba77f581ed is 1080, key is row0199/info:/1731757111064/Put/seqid=0 2024-11-16T11:38:31,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741871_1047 (size=21171) 2024-11-16T11:38:31,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741871_1047 (size=21171) 2024-11-16T11:38:31,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/a5c397ac65114204a14ac6ba77f581ed 2024-11-16T11:38:31,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/a5c397ac65114204a14ac6ba77f581ed as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a5c397ac65114204a14ac6ba77f581ed 2024-11-16T11:38:31,107 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dfc50fd7bc83de00085325d9d55f4778#info#compaction#88 average throughput is 46.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:31,108 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/9d43e7efb3a14847993637d6fe13d924 is 1080, key is row0062/info:/1731757080570/Put/seqid=0 2024-11-16T11:38:31,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a5c397ac65114204a14ac6ba77f581ed, entries=15, sequenceid=282, filesize=20.7 K 2024-11-16T11:38:31,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for dfc50fd7bc83de00085325d9d55f4778 in 22ms, sequenceid=282, compaction requested=false 2024-11-16T11:38:31,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:31,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:31,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T11:38:31,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741872_1048 (size=153817) 2024-11-16T11:38:31,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741872_1048 (size=153817) 2024-11-16T11:38:31,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/fc0be3f208e34fb393e734b723ee182d is 1080, key is row0214/info:/1731757111091/Put/seqid=0 2024-11-16T11:38:31,119 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/9d43e7efb3a14847993637d6fe13d924 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9d43e7efb3a14847993637d6fe13d924 2024-11-16T11:38:31,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741873_1049 (size=16839) 2024-11-16T11:38:31,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741873_1049 (size=16839) 2024-11-16T11:38:31,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/fc0be3f208e34fb393e734b723ee182d 2024-11-16T11:38:31,126 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dfc50fd7bc83de00085325d9d55f4778/info of dfc50fd7bc83de00085325d9d55f4778 into 9d43e7efb3a14847993637d6fe13d924(size=150.2 K), total size for store is 170.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:31,126 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:31,126 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., storeName=dfc50fd7bc83de00085325d9d55f4778/info, priority=13, startTime=1731757111090; duration=0sec 2024-11-16T11:38:31,126 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:31,126 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dfc50fd7bc83de00085325d9d55f4778:info 2024-11-16T11:38:31,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/fc0be3f208e34fb393e734b723ee182d as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fc0be3f208e34fb393e734b723ee182d 2024-11-16T11:38:31,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fc0be3f208e34fb393e734b723ee182d, entries=11, sequenceid=296, filesize=16.4 K 2024-11-16T11:38:31,134 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for dfc50fd7bc83de00085325d9d55f4778 in 21ms, sequenceid=296, compaction requested=true 2024-11-16T11:38:31,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:31,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dfc50fd7bc83de00085325d9d55f4778:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:31,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:31,134 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:31,135 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 191827 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:31,135 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): dfc50fd7bc83de00085325d9d55f4778/info is initiating minor compaction (all files) 2024-11-16T11:38:31,135 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dfc50fd7bc83de00085325d9d55f4778/info in TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:31,135 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9d43e7efb3a14847993637d6fe13d924, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a5c397ac65114204a14ac6ba77f581ed, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fc0be3f208e34fb393e734b723ee182d] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp, totalSize=187.3 K 2024-11-16T11:38:31,135 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9d43e7efb3a14847993637d6fe13d924, keycount=137, bloomtype=ROW, size=150.2 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1731757080570 2024-11-16T11:38:31,136 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5c397ac65114204a14ac6ba77f581ed, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1731757111064 2024-11-16T11:38:31,136 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting fc0be3f208e34fb393e734b723ee182d, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731757111091 2024-11-16T11:38:31,147 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dfc50fd7bc83de00085325d9d55f4778#info#compaction#90 average throughput is 83.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:31,148 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/7d6b2d2f20a84a6aa183c06468d7009d is 1080, key is row0062/info:/1731757080570/Put/seqid=0 2024-11-16T11:38:31,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741874_1050 (size=181981) 2024-11-16T11:38:31,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741874_1050 (size=181981) 2024-11-16T11:38:31,155 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/7d6b2d2f20a84a6aa183c06468d7009d as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/7d6b2d2f20a84a6aa183c06468d7009d 2024-11-16T11:38:31,160 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dfc50fd7bc83de00085325d9d55f4778/info of dfc50fd7bc83de00085325d9d55f4778 into 7d6b2d2f20a84a6aa183c06468d7009d(size=177.7 K), total size for store is 177.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:31,160 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:31,160 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., storeName=dfc50fd7bc83de00085325d9d55f4778/info, priority=13, startTime=1731757111134; duration=0sec 2024-11-16T11:38:31,160 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:31,160 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dfc50fd7bc83de00085325d9d55f4778:info 2024-11-16T11:38:31,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:31,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:32,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:32,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:33,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:33,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T11:38:33,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/9395145bde4e46c397585d3197915343 is 1080, key is row0225/info:/1731757113115/Put/seqid=0 2024-11-16T11:38:33,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741875_1051 (size=12523) 2024-11-16T11:38:33,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741875_1051 (size=12523) 2024-11-16T11:38:33,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/9395145bde4e46c397585d3197915343 2024-11-16T11:38:33,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/9395145bde4e46c397585d3197915343 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9395145bde4e46c397585d3197915343 2024-11-16T11:38:33,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9395145bde4e46c397585d3197915343, entries=7, sequenceid=308, filesize=12.2 K 2024-11-16T11:38:33,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for dfc50fd7bc83de00085325d9d55f4778 in 21ms, sequenceid=308, compaction requested=false 2024-11-16T11:38:33,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:33,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:33,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T11:38:33,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/e03a34ff9c3e489f8d7f4a4bf1261987 is 1080, key is row0232/info:/1731757113129/Put/seqid=0 2024-11-16T11:38:33,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741876_1052 (size=16839) 2024-11-16T11:38:33,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741876_1052 (size=16839) 2024-11-16T11:38:33,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/e03a34ff9c3e489f8d7f4a4bf1261987 2024-11-16T11:38:33,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/e03a34ff9c3e489f8d7f4a4bf1261987 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/e03a34ff9c3e489f8d7f4a4bf1261987 2024-11-16T11:38:33,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/e03a34ff9c3e489f8d7f4a4bf1261987, entries=11, sequenceid=322, filesize=16.4 K 2024-11-16T11:38:33,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for dfc50fd7bc83de00085325d9d55f4778 in 22ms, sequenceid=322, compaction requested=true 2024-11-16T11:38:33,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:33,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dfc50fd7bc83de00085325d9d55f4778:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T11:38:33,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:33,172 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T11:38:33,173 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 211343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T11:38:33,173 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1541): dfc50fd7bc83de00085325d9d55f4778/info is initiating minor compaction (all files) 2024-11-16T11:38:33,174 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dfc50fd7bc83de00085325d9d55f4778/info in TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:33,174 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/7d6b2d2f20a84a6aa183c06468d7009d, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9395145bde4e46c397585d3197915343, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/e03a34ff9c3e489f8d7f4a4bf1261987] into tmpdir=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp, totalSize=206.4 K 2024-11-16T11:38:33,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45505 {}] regionserver.HRegion(8855): Flush requested on dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:33,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T11:38:33,174 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7d6b2d2f20a84a6aa183c06468d7009d, keycount=163, bloomtype=ROW, size=177.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731757080570 2024-11-16T11:38:33,175 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9395145bde4e46c397585d3197915343, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1731757113115 2024-11-16T11:38:33,175 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] compactions.Compactor(225): Compacting e03a34ff9c3e489f8d7f4a4bf1261987, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1731757113129 2024-11-16T11:38:33,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/b375c22bb80d44589d4c0d0cbfc527fc is 1080, key is row0243/info:/1731757113151/Put/seqid=0 2024-11-16T11:38:33,187 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dfc50fd7bc83de00085325d9d55f4778#info#compaction#94 average throughput is 61.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T11:38:33,188 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/17b32f540c9946829b945d808d171682 is 1080, key is row0062/info:/1731757080570/Put/seqid=0 2024-11-16T11:38:33,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741877_1053 (size=17918) 2024-11-16T11:38:33,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741877_1053 (size=17918) 2024-11-16T11:38:33,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741878_1054 (size=201509) 2024-11-16T11:38:33,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741878_1054 (size=201509) 2024-11-16T11:38:33,199 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/17b32f540c9946829b945d808d171682 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/17b32f540c9946829b945d808d171682 2024-11-16T11:38:33,204 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dfc50fd7bc83de00085325d9d55f4778/info of dfc50fd7bc83de00085325d9d55f4778 into 17b32f540c9946829b945d808d171682(size=196.8 K), total size for store is 196.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T11:38:33,204 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:33,204 INFO [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., storeName=dfc50fd7bc83de00085325d9d55f4778/info, priority=13, startTime=1731757113172; duration=0sec 2024-11-16T11:38:33,204 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T11:38:33,204 DEBUG [RS:0;a7948fca2832:45505-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dfc50fd7bc83de00085325d9d55f4778:info 2024-11-16T11:38:33,252 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-16T11:38:33,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/b375c22bb80d44589d4c0d0cbfc527fc 2024-11-16T11:38:33,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/b375c22bb80d44589d4c0d0cbfc527fc as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/b375c22bb80d44589d4c0d0cbfc527fc 2024-11-16T11:38:33,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/b375c22bb80d44589d4c0d0cbfc527fc, entries=12, sequenceid=337, filesize=17.5 K 2024-11-16T11:38:33,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for dfc50fd7bc83de00085325d9d55f4778 in 428ms, sequenceid=337, compaction requested=false 2024-11-16T11:38:33,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:33,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:33,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:34,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:34,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:35,180 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-16T11:38:35,180 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C45505%2C1731757067235.1731757115180 2024-11-16T11:38:35,190 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,190 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,190 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,190 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,190 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,191 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235/a7948fca2832%2C45505%2C1731757067235.1731757067865 with entries=318, filesize=310.28 KB; new WAL /user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235/a7948fca2832%2C45505%2C1731757067235.1731757115180 2024-11-16T11:38:35,192 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38709:38709),(127.0.0.1/127.0.0.1:40855:40855)] 2024-11-16T11:38:35,192 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235/a7948fca2832%2C45505%2C1731757067235.1731757067865 is not closed yet, will try archiving it next time 2024-11-16T11:38:35,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741833_1009 (size=317731) 2024-11-16T11:38:35,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741833_1009 (size=317731) 2024-11-16T11:38:35,196 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing dfc50fd7bc83de00085325d9d55f4778 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-16T11:38:35,199 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/9e5a090179914f9eb076e0f10d06b4bb is 1080, key is row0255/info:/1731757113175/Put/seqid=0 2024-11-16T11:38:35,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741880_1056 (size=7116) 2024-11-16T11:38:35,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741880_1056 (size=7116) 2024-11-16T11:38:35,204 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/9e5a090179914f9eb076e0f10d06b4bb 2024-11-16T11:38:35,209 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/.tmp/info/9e5a090179914f9eb076e0f10d06b4bb as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9e5a090179914f9eb076e0f10d06b4bb 2024-11-16T11:38:35,213 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9e5a090179914f9eb076e0f10d06b4bb, entries=2, sequenceid=343, filesize=6.9 K 2024-11-16T11:38:35,214 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for dfc50fd7bc83de00085325d9d55f4778 in 18ms, sequenceid=343, compaction requested=true 2024-11-16T11:38:35,214 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for dfc50fd7bc83de00085325d9d55f4778: 2024-11-16T11:38:35,214 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 54197fbe674671b991ac1fbcec8165bd: 2024-11-16T11:38:35,214 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-16T11:38:35,218 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/info/b6479a2c64e14e61abe265c29e4c5a3b is 193, key is TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778./info:regioninfo/1731757081736/Put/seqid=0 2024-11-16T11:38:35,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741881_1057 (size=6223) 2024-11-16T11:38:35,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741881_1057 (size=6223) 2024-11-16T11:38:35,223 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/info/b6479a2c64e14e61abe265c29e4c5a3b 2024-11-16T11:38:35,228 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/.tmp/info/b6479a2c64e14e61abe265c29e4c5a3b as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/info/b6479a2c64e14e61abe265c29e4c5a3b 2024-11-16T11:38:35,233 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/info/b6479a2c64e14e61abe265c29e4c5a3b, entries=5, sequenceid=21, filesize=6.1 K 2024-11-16T11:38:35,234 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-16T11:38:35,234 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T11:38:35,235 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C45505%2C1731757067235.1731757115234 2024-11-16T11:38:35,239 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,239 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,239 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,239 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,239 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,239 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235/a7948fca2832%2C45505%2C1731757067235.1731757115180 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235/a7948fca2832%2C45505%2C1731757067235.1731757115234 2024-11-16T11:38:35,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741879_1055 (size=731) 2024-11-16T11:38:35,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741879_1055 (size=731) 2024-11-16T11:38:35,244 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40855:40855),(127.0.0.1/127.0.0.1:38709:38709)] 2024-11-16T11:38:35,246 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235/a7948fca2832%2C45505%2C1731757067235.1731757067865 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/oldWALs/a7948fca2832%2C45505%2C1731757067235.1731757067865 2024-11-16T11:38:35,246 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T11:38:35,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T11:38:35,246 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:38:35,247 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:38:35,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:35,247 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/WALs/a7948fca2832,45505,1731757067235/a7948fca2832%2C45505%2C1731757067235.1731757115180 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/oldWALs/a7948fca2832%2C45505%2C1731757067235.1731757115180 2024-11-16T11:38:35,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:35,247 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T11:38:35,247 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T11:38:35,247 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=916726640, stopped=false 2024-11-16T11:38:35,247 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7948fca2832,44903,1731757067063 2024-11-16T11:38:35,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:38:35,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:38:35,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:35,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:35,309 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:38:35,309 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:38:35,309 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:38:35,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:35,309 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7948fca2832,45505,1731757067235' ***** 2024-11-16T11:38:35,309 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:38:35,309 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T11:38:35,309 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:38:35,309 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T11:38:35,309 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T11:38:35,309 INFO [RS:0;a7948fca2832:45505 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(3091): Received CLOSE for dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(3091): Received CLOSE for 54197fbe674671b991ac1fbcec8165bd 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(959): stopping server a7948fca2832,45505,1731757067235 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:38:35,310 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing dfc50fd7bc83de00085325d9d55f4778, disabling compactions & flushes 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7948fca2832:45505. 2024-11-16T11:38:35,310 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:35,310 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:35,310 DEBUG [RS:0;a7948fca2832:45505 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:38:35,310 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. after waiting 0 ms 2024-11-16T11:38:35,310 DEBUG [RS:0;a7948fca2832:45505 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:35,310 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T11:38:35,310 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-16T11:38:35,310 DEBUG [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(1325): Online Regions={dfc50fd7bc83de00085325d9d55f4778=TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778., 54197fbe674671b991ac1fbcec8165bd=TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T11:38:35,310 DEBUG [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 54197fbe674671b991ac1fbcec8165bd, dfc50fd7bc83de00085325d9d55f4778 2024-11-16T11:38:35,310 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:38:35,310 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:38:35,310 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:38:35,310 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:38:35,310 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:38:35,310 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4->hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/04f0aca1348143449c818a04ba5ff9c5-top, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/29fdc84c0fd541a89b9b909ecd0f32cc, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/TestLogRolling-testLogRolling=ffee239c077be3582f21707b18f768b4-58d714e256e34f1bb4ed1f4c39fa27d0, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/260c9b9b962447f6af2a12d3ddaf6b95, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/54336d6cb7e94cb8a47fe25b9be85b8e, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/1ccd5242bbb348a69290767c6ad510ce, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/09084539bab340f0a30411ba3589ee3b, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a263c307cbfb4790b5c334fea32cf380, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fdca348422124873ba5eb0701c12dbe5, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fcacb20fa45d4fd29646cd216356d8a0, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/f3c3a0991c5b47369c276c9e91e218a9, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/16c03c7d55d44340a5c8f9394b7135a5, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/23b07ead7bce418e94d3d8e3ce2a2322, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/2882f464febd479c946236e0b4059443, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/df8edb34fa0a4676909818f6af3f1c6b, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/5dbc3a1885fa49ff96d00a7895a93f9e, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ba731d86e1b24432a9e011bc0ba5d8b7, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/3d8ae8fa551c4e48abfc5a6f02a62293, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ef2e80bf0e1849c0970ea714bb3ad9aa, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9d43e7efb3a14847993637d6fe13d924, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/08f0b14e16914108a05241a0abb9c168, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a5c397ac65114204a14ac6ba77f581ed, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/7d6b2d2f20a84a6aa183c06468d7009d, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fc0be3f208e34fb393e734b723ee182d, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9395145bde4e46c397585d3197915343, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/e03a34ff9c3e489f8d7f4a4bf1261987] to archive 2024-11-16T11:38:35,312 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T11:38:35,314 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:35,315 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/29fdc84c0fd541a89b9b909ecd0f32cc to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/29fdc84c0fd541a89b9b909ecd0f32cc 2024-11-16T11:38:35,316 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-16T11:38:35,317 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/TestLogRolling-testLogRolling=ffee239c077be3582f21707b18f768b4-58d714e256e34f1bb4ed1f4c39fa27d0 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/TestLogRolling-testLogRolling=ffee239c077be3582f21707b18f768b4-58d714e256e34f1bb4ed1f4c39fa27d0 2024-11-16T11:38:35,317 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:38:35,317 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:38:35,317 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731757115310Running coprocessor pre-close hooks at 1731757115310Disabling compacts and flushes for region at 1731757115310Disabling writes for close at 1731757115310Writing region close event to WAL at 1731757115313 (+3 ms)Running coprocessor post-close hooks at 1731757115317 (+4 ms)Closed at 1731757115317 2024-11-16T11:38:35,317 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T11:38:35,318 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/260c9b9b962447f6af2a12d3ddaf6b95 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/260c9b9b962447f6af2a12d3ddaf6b95 2024-11-16T11:38:35,319 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/54336d6cb7e94cb8a47fe25b9be85b8e to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/54336d6cb7e94cb8a47fe25b9be85b8e 2024-11-16T11:38:35,321 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/1ccd5242bbb348a69290767c6ad510ce to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/1ccd5242bbb348a69290767c6ad510ce 2024-11-16T11:38:35,322 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/09084539bab340f0a30411ba3589ee3b to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/09084539bab340f0a30411ba3589ee3b 2024-11-16T11:38:35,323 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a263c307cbfb4790b5c334fea32cf380 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a263c307cbfb4790b5c334fea32cf380 2024-11-16T11:38:35,324 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fdca348422124873ba5eb0701c12dbe5 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fdca348422124873ba5eb0701c12dbe5 2024-11-16T11:38:35,325 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fcacb20fa45d4fd29646cd216356d8a0 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fcacb20fa45d4fd29646cd216356d8a0 2024-11-16T11:38:35,326 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/f3c3a0991c5b47369c276c9e91e218a9 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/f3c3a0991c5b47369c276c9e91e218a9 2024-11-16T11:38:35,327 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/16c03c7d55d44340a5c8f9394b7135a5 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/16c03c7d55d44340a5c8f9394b7135a5 2024-11-16T11:38:35,328 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/23b07ead7bce418e94d3d8e3ce2a2322 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/23b07ead7bce418e94d3d8e3ce2a2322 2024-11-16T11:38:35,330 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/2882f464febd479c946236e0b4059443 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/2882f464febd479c946236e0b4059443 2024-11-16T11:38:35,331 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/df8edb34fa0a4676909818f6af3f1c6b to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/df8edb34fa0a4676909818f6af3f1c6b 2024-11-16T11:38:35,332 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/5dbc3a1885fa49ff96d00a7895a93f9e to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/5dbc3a1885fa49ff96d00a7895a93f9e 2024-11-16T11:38:35,333 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ba731d86e1b24432a9e011bc0ba5d8b7 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ba731d86e1b24432a9e011bc0ba5d8b7 2024-11-16T11:38:35,334 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/3d8ae8fa551c4e48abfc5a6f02a62293 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/3d8ae8fa551c4e48abfc5a6f02a62293 2024-11-16T11:38:35,335 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ef2e80bf0e1849c0970ea714bb3ad9aa to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/ef2e80bf0e1849c0970ea714bb3ad9aa 2024-11-16T11:38:35,336 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9d43e7efb3a14847993637d6fe13d924 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9d43e7efb3a14847993637d6fe13d924 2024-11-16T11:38:35,337 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/08f0b14e16914108a05241a0abb9c168 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/08f0b14e16914108a05241a0abb9c168 2024-11-16T11:38:35,338 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a5c397ac65114204a14ac6ba77f581ed to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/a5c397ac65114204a14ac6ba77f581ed 2024-11-16T11:38:35,340 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/7d6b2d2f20a84a6aa183c06468d7009d to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/7d6b2d2f20a84a6aa183c06468d7009d 2024-11-16T11:38:35,341 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fc0be3f208e34fb393e734b723ee182d to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/fc0be3f208e34fb393e734b723ee182d 2024-11-16T11:38:35,342 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9395145bde4e46c397585d3197915343 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/9395145bde4e46c397585d3197915343 2024-11-16T11:38:35,343 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/e03a34ff9c3e489f8d7f4a4bf1261987 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/info/e03a34ff9c3e489f8d7f4a4bf1261987 2024-11-16T11:38:35,344 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a7948fca2832:44903 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T11:38:35,344 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [29fdc84c0fd541a89b9b909ecd0f32cc=8359, 260c9b9b962447f6af2a12d3ddaf6b95=12509, 54336d6cb7e94cb8a47fe25b9be85b8e=28952, 1ccd5242bbb348a69290767c6ad510ce=17894, 09084539bab340f0a30411ba3589ee3b=20066, a263c307cbfb4790b5c334fea32cf380=56032, fdca348422124873ba5eb0701c12dbe5=16828, fcacb20fa45d4fd29646cd216356d8a0=16828, f3c3a0991c5b47369c276c9e91e218a9=78909, 16c03c7d55d44340a5c8f9394b7135a5=15750, 23b07ead7bce418e94d3d8e3ce2a2322=17906, 2882f464febd479c946236e0b4059443=104891, df8edb34fa0a4676909818f6af3f1c6b=17906, 5dbc3a1885fa49ff96d00a7895a93f9e=13594, ba731d86e1b24432a9e011bc0ba5d8b7=126685, 3d8ae8fa551c4e48abfc5a6f02a62293=17906, ef2e80bf0e1849c0970ea714bb3ad9aa=24394, 9d43e7efb3a14847993637d6fe13d924=153817, 08f0b14e16914108a05241a0abb9c168=12523, a5c397ac65114204a14ac6ba77f581ed=21171, 7d6b2d2f20a84a6aa183c06468d7009d=181981, fc0be3f208e34fb393e734b723ee182d=16839, 9395145bde4e46c397585d3197915343=12523, e03a34ff9c3e489f8d7f4a4bf1261987=16839] 2024-11-16T11:38:35,348 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/dfc50fd7bc83de00085325d9d55f4778/recovered.edits/346.seqid, newMaxSeqId=346, maxSeqId=88 2024-11-16T11:38:35,348 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:35,348 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for dfc50fd7bc83de00085325d9d55f4778: Waiting for close lock at 1731757115310Running coprocessor pre-close hooks at 1731757115310Disabling compacts and flushes for region at 1731757115310Disabling writes for close at 1731757115310Writing region close event to WAL at 1731757115345 (+35 ms)Running coprocessor post-close hooks at 1731757115348 (+3 ms)Closed at 1731757115348 2024-11-16T11:38:35,348 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731757081050.dfc50fd7bc83de00085325d9d55f4778. 2024-11-16T11:38:35,348 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 54197fbe674671b991ac1fbcec8165bd, disabling compactions & flushes 2024-11-16T11:38:35,349 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:35,349 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:35,349 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. after waiting 0 ms 2024-11-16T11:38:35,349 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:35,349 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4->hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/ffee239c077be3582f21707b18f768b4/info/04f0aca1348143449c818a04ba5ff9c5-bottom] to archive 2024-11-16T11:38:35,350 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T11:38:35,351 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4 to hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/archive/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/info/04f0aca1348143449c818a04ba5ff9c5.ffee239c077be3582f21707b18f768b4 2024-11-16T11:38:35,351 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-16T11:38:35,354 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/data/default/TestLogRolling-testLogRolling/54197fbe674671b991ac1fbcec8165bd/recovered.edits/93.seqid, newMaxSeqId=93, maxSeqId=88 2024-11-16T11:38:35,354 INFO [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:35,354 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 54197fbe674671b991ac1fbcec8165bd: Waiting for close lock at 1731757115348Running coprocessor pre-close hooks at 1731757115348Disabling compacts and flushes for region at 1731757115348Disabling writes for close at 1731757115349 (+1 ms)Writing region close event to WAL at 1731757115351 (+2 ms)Running coprocessor post-close hooks at 1731757115354 (+3 ms)Closed at 1731757115354 2024-11-16T11:38:35,354 DEBUG [RS_CLOSE_REGION-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731757081050.54197fbe674671b991ac1fbcec8165bd. 2024-11-16T11:38:35,510 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(976): stopping server a7948fca2832,45505,1731757067235; all regions closed. 2024-11-16T11:38:35,511 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,511 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,511 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,511 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,511 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741834_1010 (size=8107) 2024-11-16T11:38:35,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741834_1010 (size=8107) 2024-11-16T11:38:35,516 DEBUG [RS:0;a7948fca2832:45505 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/oldWALs 2024-11-16T11:38:35,516 INFO [RS:0;a7948fca2832:45505 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C45505%2C1731757067235.meta:.meta(num 1731757068245) 2024-11-16T11:38:35,516 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,516 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,516 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,516 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,516 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741882_1058 (size=778) 2024-11-16T11:38:35,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741882_1058 (size=778) 2024-11-16T11:38:35,520 DEBUG [RS:0;a7948fca2832:45505 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/oldWALs 2024-11-16T11:38:35,520 INFO [RS:0;a7948fca2832:45505 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C45505%2C1731757067235:(num 1731757115234) 2024-11-16T11:38:35,520 DEBUG [RS:0;a7948fca2832:45505 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:35,520 INFO [RS:0;a7948fca2832:45505 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:38:35,520 INFO [RS:0;a7948fca2832:45505 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:38:35,520 INFO [RS:0;a7948fca2832:45505 {}] hbase.ChoreService(370): Chore service for: regionserver/a7948fca2832:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T11:38:35,520 INFO [RS:0;a7948fca2832:45505 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:38:35,521 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:38:35,521 INFO [RS:0;a7948fca2832:45505 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45505 2024-11-16T11:38:35,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7948fca2832,45505,1731757067235 2024-11-16T11:38:35,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:38:35,529 INFO [RS:0;a7948fca2832:45505 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:38:35,540 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7948fca2832,45505,1731757067235] 2024-11-16T11:38:35,550 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7948fca2832,45505,1731757067235 already deleted, retry=false 2024-11-16T11:38:35,550 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7948fca2832,45505,1731757067235 expired; onlineServers=0 2024-11-16T11:38:35,551 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7948fca2832,44903,1731757067063' ***** 2024-11-16T11:38:35,551 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T11:38:35,551 INFO [M:0;a7948fca2832:44903 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:38:35,551 INFO [M:0;a7948fca2832:44903 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:38:35,551 DEBUG [M:0;a7948fca2832:44903 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T11:38:35,551 DEBUG [M:0;a7948fca2832:44903 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T11:38:35,551 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T11:38:35,551 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731757067583 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731757067583,5,FailOnTimeoutGroup] 2024-11-16T11:38:35,551 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731757067584 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731757067584,5,FailOnTimeoutGroup] 2024-11-16T11:38:35,551 INFO [M:0;a7948fca2832:44903 {}] hbase.ChoreService(370): Chore service for: master/a7948fca2832:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T11:38:35,551 INFO [M:0;a7948fca2832:44903 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:38:35,551 DEBUG [M:0;a7948fca2832:44903 {}] master.HMaster(1795): Stopping service threads 2024-11-16T11:38:35,551 INFO [M:0;a7948fca2832:44903 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T11:38:35,552 INFO [M:0;a7948fca2832:44903 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:38:35,552 INFO [M:0;a7948fca2832:44903 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T11:38:35,552 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T11:38:35,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T11:38:35,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:35,561 DEBUG [M:0;a7948fca2832:44903 {}] zookeeper.ZKUtil(347): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T11:38:35,561 WARN [M:0;a7948fca2832:44903 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T11:38:35,562 INFO [M:0;a7948fca2832:44903 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/.lastflushedseqids 2024-11-16T11:38:35,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741883_1059 (size=228) 2024-11-16T11:38:35,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741883_1059 (size=228) 2024-11-16T11:38:35,567 INFO [M:0;a7948fca2832:44903 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T11:38:35,567 INFO [M:0;a7948fca2832:44903 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T11:38:35,567 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:38:35,567 INFO [M:0;a7948fca2832:44903 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:35,567 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:35,567 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:38:35,567 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:35,568 INFO [M:0;a7948fca2832:44903 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-16T11:38:35,583 DEBUG [M:0;a7948fca2832:44903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a396567b48a24ec08ba10e2a150e1560 is 82, key is hbase:meta,,1/info:regioninfo/1731757068267/Put/seqid=0 2024-11-16T11:38:35,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741884_1060 (size=5672) 2024-11-16T11:38:35,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741884_1060 (size=5672) 2024-11-16T11:38:35,587 INFO [M:0;a7948fca2832:44903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a396567b48a24ec08ba10e2a150e1560 2024-11-16T11:38:35,607 DEBUG [M:0;a7948fca2832:44903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aff594752d674e69a914202a902995c9 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731757068747/Put/seqid=0 2024-11-16T11:38:35,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741885_1061 (size=7090) 2024-11-16T11:38:35,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741885_1061 (size=7090) 2024-11-16T11:38:35,612 INFO [M:0;a7948fca2832:44903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aff594752d674e69a914202a902995c9 2024-11-16T11:38:35,615 INFO [M:0;a7948fca2832:44903 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for aff594752d674e69a914202a902995c9 2024-11-16T11:38:35,631 DEBUG [M:0;a7948fca2832:44903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9759370541e84d4db9439218cce89345 is 69, key is a7948fca2832,45505,1731757067235/rs:state/1731757067707/Put/seqid=0 2024-11-16T11:38:35,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741886_1062 (size=5156) 2024-11-16T11:38:35,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741886_1062 (size=5156) 2024-11-16T11:38:35,635 INFO [M:0;a7948fca2832:44903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9759370541e84d4db9439218cce89345 2024-11-16T11:38:35,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:38:35,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45505-0x101436fc6cc0001, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:38:35,640 INFO [RS:0;a7948fca2832:45505 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:38:35,640 INFO [RS:0;a7948fca2832:45505 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7948fca2832,45505,1731757067235; zookeeper connection closed. 2024-11-16T11:38:35,641 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7767cba9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7767cba9 2024-11-16T11:38:35,641 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T11:38:35,652 DEBUG [M:0;a7948fca2832:44903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/70168b64b4ec40e18926adb4658beb66 is 52, key is load_balancer_on/state:d/1731757068371/Put/seqid=0 2024-11-16T11:38:35,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741887_1063 (size=5056) 2024-11-16T11:38:35,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741887_1063 (size=5056) 2024-11-16T11:38:35,657 INFO [M:0;a7948fca2832:44903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/70168b64b4ec40e18926adb4658beb66 2024-11-16T11:38:35,661 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a396567b48a24ec08ba10e2a150e1560 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a396567b48a24ec08ba10e2a150e1560 2024-11-16T11:38:35,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:35,664 INFO [M:0;a7948fca2832:44903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a396567b48a24ec08ba10e2a150e1560, entries=8, sequenceid=125, filesize=5.5 K 2024-11-16T11:38:35,665 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aff594752d674e69a914202a902995c9 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aff594752d674e69a914202a902995c9 2024-11-16T11:38:35,670 INFO [M:0;a7948fca2832:44903 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for aff594752d674e69a914202a902995c9 2024-11-16T11:38:35,670 INFO [M:0;a7948fca2832:44903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aff594752d674e69a914202a902995c9, entries=13, sequenceid=125, filesize=6.9 K 2024-11-16T11:38:35,671 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9759370541e84d4db9439218cce89345 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9759370541e84d4db9439218cce89345 2024-11-16T11:38:35,676 INFO [M:0;a7948fca2832:44903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9759370541e84d4db9439218cce89345, entries=1, sequenceid=125, filesize=5.0 K 2024-11-16T11:38:35,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:35,677 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/70168b64b4ec40e18926adb4658beb66 as hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/70168b64b4ec40e18926adb4658beb66 2024-11-16T11:38:35,681 INFO [M:0;a7948fca2832:44903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36419/user/jenkins/test-data/e2d2ecf2-8bbd-bca3-7b66-cafeb87b8472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/70168b64b4ec40e18926adb4658beb66, entries=1, sequenceid=125, filesize=4.9 K 2024-11-16T11:38:35,682 INFO [M:0;a7948fca2832:44903 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false 2024-11-16T11:38:35,684 INFO [M:0;a7948fca2832:44903 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:35,684 DEBUG [M:0;a7948fca2832:44903 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731757115567Disabling compacts and flushes for region at 1731757115567Disabling writes for close at 1731757115567Obtaining lock to block concurrent updates at 1731757115568 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731757115568Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731757115568Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731757115569 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731757115569Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731757115583 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731757115583Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731757115592 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731757115607 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731757115607Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731757115616 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731757115630 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731757115630Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731757115639 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731757115652 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731757115652Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@320bc28f: reopening flushed file at 1731757115660 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@762aac9a: reopening flushed file at 1731757115664 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cba0eac: reopening flushed file at 1731757115670 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42a3df2a: reopening flushed file at 1731757115676 (+6 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false at 1731757115682 (+6 ms)Writing region close event to WAL at 1731757115684 (+2 ms)Closed at 1731757115684 2024-11-16T11:38:35,684 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,684 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,684 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,685 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,685 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:35,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44187 is added to blk_1073741830_1006 (size=61320) 2024-11-16T11:38:35,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39767 is added to blk_1073741830_1006 (size=61320) 2024-11-16T11:38:35,687 INFO [M:0;a7948fca2832:44903 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T11:38:35,687 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:38:35,687 INFO [M:0;a7948fca2832:44903 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44903 2024-11-16T11:38:35,687 INFO [M:0;a7948fca2832:44903 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:38:35,730 INFO [regionserver/a7948fca2832:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:38:35,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:38:35,798 INFO [M:0;a7948fca2832:44903 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:38:35,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44903-0x101436fc6cc0000, quorum=127.0.0.1:55822, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:38:35,805 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55f5baef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:38:35,806 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@737c094c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:38:35,807 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:38:35,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ea6ab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:38:35,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49e2f900{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.log.dir/,STOPPED} 2024-11-16T11:38:35,810 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:38:35,810 WARN [BP-1741585453-172.17.0.2-1731757064714 heartbeating to localhost/127.0.0.1:36419 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:38:35,810 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:38:35,810 WARN [BP-1741585453-172.17.0.2-1731757064714 heartbeating to localhost/127.0.0.1:36419 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1741585453-172.17.0.2-1731757064714 (Datanode Uuid 590a83ac-2b74-4e93-8bb6-e132d23538f3) service to localhost/127.0.0.1:36419 2024-11-16T11:38:35,811 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/data/data3/current/BP-1741585453-172.17.0.2-1731757064714 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:38:35,811 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/data/data4/current/BP-1741585453-172.17.0.2-1731757064714 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:38:35,812 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:38:35,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d3496cb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:38:35,814 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ce23f2e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:38:35,814 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:38:35,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f79190{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:38:35,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@645803e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.log.dir/,STOPPED} 2024-11-16T11:38:35,815 WARN [BP-1741585453-172.17.0.2-1731757064714 heartbeating to localhost/127.0.0.1:36419 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:38:35,815 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:38:35,815 WARN [BP-1741585453-172.17.0.2-1731757064714 heartbeating to localhost/127.0.0.1:36419 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1741585453-172.17.0.2-1731757064714 (Datanode Uuid b6559877-6e76-4e9c-8c90-ae570e87e06f) service to localhost/127.0.0.1:36419 2024-11-16T11:38:35,815 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:38:35,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/data/data1/current/BP-1741585453-172.17.0.2-1731757064714 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:38:35,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/cluster_ef38439f-ba15-e39a-5a9d-101dc9ba3f7f/data/data2/current/BP-1741585453-172.17.0.2-1731757064714 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:38:35,816 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:38:35,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52f2ae6f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:38:35,822 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d3fac28{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:38:35,822 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:38:35,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fb22e39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:38:35,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@456bd1e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.log.dir/,STOPPED} 2024-11-16T11:38:35,828 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T11:38:35,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T11:38:35,867 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 205) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36419 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36419 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36419 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:36419 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36419 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36419 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:36419 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:36419 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:36419 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36419 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=174 (was 236), ProcessCount=11 (was 11), AvailableMemoryMB=4465 (was 3671) - AvailableMemoryMB LEAK? - 2024-11-16T11:38:35,874 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=174, ProcessCount=11, AvailableMemoryMB=4465 2024-11-16T11:38:35,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T11:38:35,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.log.dir so I do NOT create it in target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c 2024-11-16T11:38:35,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b12ef9f0-7a3f-f285-9d2e-e3f95a53d5a2/hadoop.tmp.dir so I do NOT create it in target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c 2024-11-16T11:38:35,874 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92, deleteOnExit=true 2024-11-16T11:38:35,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T11:38:35,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/test.cache.data in system properties and HBase conf 2024-11-16T11:38:35,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T11:38:35,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/hadoop.log.dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T11:38:35,875 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/nfs.dump.dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/java.io.tmpdir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T11:38:35,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T11:38:35,889 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:38:36,244 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:38:36,248 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:38:36,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:38:36,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:38:36,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:38:36,256 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:38:36,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d39a950{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:38:36,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@285385d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:38:36,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a3da410{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/java.io.tmpdir/jetty-localhost-33025-hadoop-hdfs-3_4_1-tests_jar-_-any-17481108478943565851/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:38:36,349 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69e6c21{HTTP/1.1, (http/1.1)}{localhost:33025} 2024-11-16T11:38:36,349 INFO [Time-limited test {}] server.Server(415): Started @301571ms 2024-11-16T11:38:36,359 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T11:38:36,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:38:36,613 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:38:36,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:38:36,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:38:36,613 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T11:38:36,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@677ce9c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:38:36,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7af5dd93{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:38:36,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:36,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:36,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a318fe4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/java.io.tmpdir/jetty-localhost-45233-hadoop-hdfs-3_4_1-tests_jar-_-any-7142233424017617750/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:38:36,711 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36d9a593{HTTP/1.1, (http/1.1)}{localhost:45233} 2024-11-16T11:38:36,711 INFO [Time-limited test {}] server.Server(415): Started @301933ms 2024-11-16T11:38:36,712 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:38:36,735 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T11:38:36,738 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T11:38:36,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T11:38:36,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T11:38:36,738 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T11:38:36,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@552c31d5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/hadoop.log.dir/,AVAILABLE} 2024-11-16T11:38:36,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e16d0af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T11:38:36,832 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d4e388c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/java.io.tmpdir/jetty-localhost-39087-hadoop-hdfs-3_4_1-tests_jar-_-any-10135983973354708429/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:38:36,832 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ca64d19{HTTP/1.1, (http/1.1)}{localhost:39087} 2024-11-16T11:38:36,833 INFO [Time-limited test {}] server.Server(415): Started @302054ms 2024-11-16T11:38:36,833 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T11:38:37,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:37,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:38,082 WARN [Thread-2512 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/data/data1/current/BP-1876677641-172.17.0.2-1731757115891/current, will proceed with Du for space computation calculation, 2024-11-16T11:38:38,083 WARN [Thread-2513 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/data/data2/current/BP-1876677641-172.17.0.2-1731757115891/current, will proceed with Du for space computation calculation, 2024-11-16T11:38:38,103 WARN [Thread-2476 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:38:38,105 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf048e62279251fe8 with lease ID 0x5ccf0c921247e0f4: Processing first storage report for DS-856570fb-495a-4d49-849e-059e0b10989a from datanode DatanodeRegistration(127.0.0.1:45477, datanodeUuid=f2f8ac50-1501-46e7-a726-d0f6723dae44, infoPort=35293, infoSecurePort=0, ipcPort=44359, storageInfo=lv=-57;cid=testClusterID;nsid=1103565708;c=1731757115891) 2024-11-16T11:38:38,105 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf048e62279251fe8 with lease ID 0x5ccf0c921247e0f4: from storage DS-856570fb-495a-4d49-849e-059e0b10989a node DatanodeRegistration(127.0.0.1:45477, datanodeUuid=f2f8ac50-1501-46e7-a726-d0f6723dae44, infoPort=35293, infoSecurePort=0, ipcPort=44359, storageInfo=lv=-57;cid=testClusterID;nsid=1103565708;c=1731757115891), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:38:38,105 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf048e62279251fe8 with lease ID 0x5ccf0c921247e0f4: Processing first storage report for DS-17fc8d2c-6de3-4c17-959f-6ef034f8f5c6 from datanode DatanodeRegistration(127.0.0.1:45477, datanodeUuid=f2f8ac50-1501-46e7-a726-d0f6723dae44, infoPort=35293, infoSecurePort=0, ipcPort=44359, storageInfo=lv=-57;cid=testClusterID;nsid=1103565708;c=1731757115891) 2024-11-16T11:38:38,105 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf048e62279251fe8 with lease ID 0x5ccf0c921247e0f4: from storage DS-17fc8d2c-6de3-4c17-959f-6ef034f8f5c6 node DatanodeRegistration(127.0.0.1:45477, datanodeUuid=f2f8ac50-1501-46e7-a726-d0f6723dae44, infoPort=35293, infoSecurePort=0, ipcPort=44359, storageInfo=lv=-57;cid=testClusterID;nsid=1103565708;c=1731757115891), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:38:38,193 WARN [Thread-2524 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/data/data4/current/BP-1876677641-172.17.0.2-1731757115891/current, will proceed with Du for space computation calculation, 2024-11-16T11:38:38,193 WARN [Thread-2523 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/data/data3/current/BP-1876677641-172.17.0.2-1731757115891/current, will proceed with Du for space computation calculation, 2024-11-16T11:38:38,207 WARN [Thread-2499 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T11:38:38,209 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x90048c622a072715 with lease ID 0x5ccf0c921247e0f5: Processing first storage report for DS-bbdc727a-af4d-4758-bfd4-d620401b649d from datanode DatanodeRegistration(127.0.0.1:37559, datanodeUuid=a1050fcb-27f8-4965-b162-65cc29a62bb8, infoPort=38873, infoSecurePort=0, ipcPort=32769, storageInfo=lv=-57;cid=testClusterID;nsid=1103565708;c=1731757115891) 2024-11-16T11:38:38,209 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x90048c622a072715 with lease ID 0x5ccf0c921247e0f5: from storage DS-bbdc727a-af4d-4758-bfd4-d620401b649d node DatanodeRegistration(127.0.0.1:37559, datanodeUuid=a1050fcb-27f8-4965-b162-65cc29a62bb8, infoPort=38873, infoSecurePort=0, ipcPort=32769, storageInfo=lv=-57;cid=testClusterID;nsid=1103565708;c=1731757115891), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:38:38,209 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x90048c622a072715 with lease ID 0x5ccf0c921247e0f5: Processing first storage report for DS-cce45847-f47b-492d-9aba-87c9bc374ec0 from datanode DatanodeRegistration(127.0.0.1:37559, datanodeUuid=a1050fcb-27f8-4965-b162-65cc29a62bb8, infoPort=38873, infoSecurePort=0, ipcPort=32769, storageInfo=lv=-57;cid=testClusterID;nsid=1103565708;c=1731757115891) 2024-11-16T11:38:38,209 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x90048c622a072715 with lease ID 0x5ccf0c921247e0f5: from storage DS-cce45847-f47b-492d-9aba-87c9bc374ec0 node DatanodeRegistration(127.0.0.1:37559, datanodeUuid=a1050fcb-27f8-4965-b162-65cc29a62bb8, infoPort=38873, infoSecurePort=0, ipcPort=32769, storageInfo=lv=-57;cid=testClusterID;nsid=1103565708;c=1731757115891), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T11:38:38,266 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c 2024-11-16T11:38:38,268 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/zookeeper_0, clientPort=55582, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T11:38:38,269 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55582 2024-11-16T11:38:38,269 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:38:38,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:38:38,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:38:38,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741825_1001 (size=7) 2024-11-16T11:38:38,278 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61 with version=8 2024-11-16T11:38:38,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39015/user/jenkins/test-data/70b46e9f-0936-83a0-3af1-358386b07062/hbase-staging 2024-11-16T11:38:38,281 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:38:38,281 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:38:38,281 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:38:38,281 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:38:38,281 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:38:38,281 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:38:38,281 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T11:38:38,281 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:38:38,282 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32973 2024-11-16T11:38:38,283 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32973 connecting to ZooKeeper ensemble=127.0.0.1:55582 2024-11-16T11:38:38,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329730x0, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:38:38,342 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32973-0x10143708ee70000 connected 2024-11-16T11:38:38,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:38:38,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:38:38,428 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:38:38,429 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61, hbase.cluster.distributed=false 2024-11-16T11:38:38,431 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:38:38,432 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32973 2024-11-16T11:38:38,432 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32973 2024-11-16T11:38:38,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32973 2024-11-16T11:38:38,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32973 2024-11-16T11:38:38,434 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32973 2024-11-16T11:38:38,449 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a7948fca2832:0 server-side Connection retries=45 2024-11-16T11:38:38,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:38:38,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T11:38:38,450 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T11:38:38,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T11:38:38,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T11:38:38,450 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T11:38:38,450 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T11:38:38,450 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44647 2024-11-16T11:38:38,451 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44647 connecting to ZooKeeper ensemble=127.0.0.1:55582 2024-11-16T11:38:38,451 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:38:38,453 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:38:38,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446470x0, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T11:38:38,466 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44647-0x10143708ee70001 connected 2024-11-16T11:38:38,466 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:38:38,467 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T11:38:38,467 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T11:38:38,468 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T11:38:38,469 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T11:38:38,469 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44647 2024-11-16T11:38:38,469 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44647 2024-11-16T11:38:38,469 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44647 2024-11-16T11:38:38,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44647 2024-11-16T11:38:38,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44647 2024-11-16T11:38:38,483 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a7948fca2832:32973 2024-11-16T11:38:38,483 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a7948fca2832,32973,1731757118280 2024-11-16T11:38:38,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:38:38,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:38:38,495 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a7948fca2832,32973,1731757118280 2024-11-16T11:38:38,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T11:38:38,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,509 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T11:38:38,510 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a7948fca2832,32973,1731757118280 from backup master directory 2024-11-16T11:38:38,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a7948fca2832,32973,1731757118280 2024-11-16T11:38:38,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:38:38,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T11:38:38,519 WARN [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:38:38,519 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a7948fca2832,32973,1731757118280 2024-11-16T11:38:38,525 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/hbase.id] with ID: 00bdfb38-bd65-4f6a-bf52-b1836ac01501 2024-11-16T11:38:38,525 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/.tmp/hbase.id 2024-11-16T11:38:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:38:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741826_1002 (size=42) 2024-11-16T11:38:38,534 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/.tmp/hbase.id]:[hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/hbase.id] 2024-11-16T11:38:38,544 INFO [master/a7948fca2832:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:38:38,545 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T11:38:38,546 INFO [master/a7948fca2832:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T11:38:38,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:38:38,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741827_1003 (size=196) 2024-11-16T11:38:38,565 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T11:38:38,565 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T11:38:38,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:38:38,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:38:38,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741828_1004 (size=1189) 2024-11-16T11:38:38,573 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store 2024-11-16T11:38:38,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:38:38,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741829_1005 (size=34) 2024-11-16T11:38:38,579 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:38:38,579 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:38:38,579 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:38,579 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:38,579 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:38:38,579 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:38,579 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:38,579 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731757118579Disabling compacts and flushes for region at 1731757118579Disabling writes for close at 1731757118579Writing region close event to WAL at 1731757118579Closed at 1731757118579 2024-11-16T11:38:38,580 WARN [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/.initializing 2024-11-16T11:38:38,580 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/WALs/a7948fca2832,32973,1731757118280 2024-11-16T11:38:38,581 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C32973%2C1731757118280, suffix=, logDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/WALs/a7948fca2832,32973,1731757118280, archiveDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/oldWALs, maxLogs=10 2024-11-16T11:38:38,582 INFO [master/a7948fca2832:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C32973%2C1731757118280.1731757118581 2024-11-16T11:38:38,586 INFO [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/WALs/a7948fca2832,32973,1731757118280/a7948fca2832%2C32973%2C1731757118280.1731757118581 2024-11-16T11:38:38,587 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38873:38873),(127.0.0.1/127.0.0.1:35293:35293)] 2024-11-16T11:38:38,587 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:38:38,588 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:38:38,588 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,588 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T11:38:38,592 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:38,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:38:38,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T11:38:38,593 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:38,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:38:38,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T11:38:38,594 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:38,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:38:38,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T11:38:38,595 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:38,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T11:38:38,596 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,597 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,597 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,598 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,598 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,598 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T11:38:38,599 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T11:38:38,601 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:38:38,601 INFO [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791228, jitterRate=0.006099626421928406}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T11:38:38,602 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731757118588Initializing all the Stores at 1731757118588Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757118589 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757118590 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757118590Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757118590Cleaning up temporary data from old regions at 1731757118598 (+8 ms)Region opened successfully at 1731757118602 (+4 ms) 2024-11-16T11:38:38,602 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T11:38:38,604 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f3fbdea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:38:38,605 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T11:38:38,605 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T11:38:38,605 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T11:38:38,605 INFO [master/a7948fca2832:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T11:38:38,605 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T11:38:38,606 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T11:38:38,606 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T11:38:38,607 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T11:38:38,608 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T11:38:38,613 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T11:38:38,613 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T11:38:38,614 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T11:38:38,624 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T11:38:38,624 INFO [master/a7948fca2832:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T11:38:38,625 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T11:38:38,634 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T11:38:38,635 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T11:38:38,645 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T11:38:38,647 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T11:38:38,655 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T11:38:38,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:38:38,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T11:38:38,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:38,667 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a7948fca2832,32973,1731757118280, sessionid=0x10143708ee70000, setting cluster-up flag (Was=false) 2024-11-16T11:38:38,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:38,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,718 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T11:38:38,719 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,32973,1731757118280 2024-11-16T11:38:38,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:38,771 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T11:38:38,775 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a7948fca2832,32973,1731757118280 2024-11-16T11:38:38,777 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T11:38:38,780 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T11:38:38,781 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T11:38:38,781 INFO [master/a7948fca2832:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T11:38:38,782 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a7948fca2832,32973,1731757118280 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T11:38:38,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:38:38,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:38:38,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:38:38,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a7948fca2832:0, corePoolSize=5, maxPoolSize=5 2024-11-16T11:38:38,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a7948fca2832:0, corePoolSize=10, maxPoolSize=10 2024-11-16T11:38:38,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:38:38,784 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,785 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731757148785 2024-11-16T11:38:38,785 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T11:38:38,785 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T11:38:38,785 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T11:38:38,785 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T11:38:38,785 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T11:38:38,785 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T11:38:38,785 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,785 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:38:38,785 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T11:38:38,786 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:38,786 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T11:38:38,787 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T11:38:38,787 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T11:38:38,787 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T11:38:38,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T11:38:38,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T11:38:38,788 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731757118788,5,FailOnTimeoutGroup] 2024-11-16T11:38:38,788 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731757118788,5,FailOnTimeoutGroup] 2024-11-16T11:38:38,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T11:38:38,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,788 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:38:38,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741831_1007 (size=1321) 2024-11-16T11:38:38,793 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T11:38:38,793 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61 2024-11-16T11:38:38,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:38:38,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741832_1008 (size=32) 2024-11-16T11:38:38,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:38:38,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:38:38,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:38:38,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:38,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:38:38,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:38:38,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:38:38,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:38,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:38:38,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:38:38,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:38:38,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:38,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:38:38,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:38:38,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:38:38,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:38,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:38:38,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:38:38,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740 2024-11-16T11:38:38,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740 2024-11-16T11:38:38,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:38:38,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:38:38,810 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:38:38,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:38:38,812 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T11:38:38,813 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804057, jitterRate=0.022411569952964783}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:38:38,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731757118799Initializing all the Stores at 1731757118800 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757118800Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757118800Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757118800Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757118800Cleaning up temporary data from old regions at 1731757118809 (+9 ms)Region opened successfully at 1731757118813 (+4 ms) 2024-11-16T11:38:38,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:38:38,814 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:38:38,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:38:38,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:38:38,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:38:38,814 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:38:38,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731757118814Disabling compacts and flushes for region at 1731757118814Disabling writes for close at 1731757118814Writing region close event to WAL at 1731757118814Closed at 1731757118814 2024-11-16T11:38:38,815 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:38:38,815 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T11:38:38,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T11:38:38,817 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:38:38,818 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T11:38:38,873 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(746): ClusterId : 00bdfb38-bd65-4f6a-bf52-b1836ac01501 2024-11-16T11:38:38,873 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T11:38:38,886 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T11:38:38,886 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T11:38:38,898 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T11:38:38,898 DEBUG [RS:0;a7948fca2832:44647 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2482d8e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a7948fca2832/172.17.0.2:0 2024-11-16T11:38:38,908 DEBUG [RS:0;a7948fca2832:44647 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a7948fca2832:44647 2024-11-16T11:38:38,909 INFO [RS:0;a7948fca2832:44647 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T11:38:38,909 INFO [RS:0;a7948fca2832:44647 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T11:38:38,909 DEBUG [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T11:38:38,909 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(2659): reportForDuty to master=a7948fca2832,32973,1731757118280 with port=44647, startcode=1731757118449 2024-11-16T11:38:38,910 DEBUG [RS:0;a7948fca2832:44647 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T11:38:38,911 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39297, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T11:38:38,912 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32973 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a7948fca2832,44647,1731757118449 2024-11-16T11:38:38,912 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32973 {}] master.ServerManager(517): Registering regionserver=a7948fca2832,44647,1731757118449 2024-11-16T11:38:38,913 DEBUG [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61 2024-11-16T11:38:38,913 DEBUG [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44661 2024-11-16T11:38:38,913 DEBUG [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T11:38:38,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:38:38,919 DEBUG [RS:0;a7948fca2832:44647 {}] zookeeper.ZKUtil(111): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a7948fca2832,44647,1731757118449 2024-11-16T11:38:38,919 WARN [RS:0;a7948fca2832:44647 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T11:38:38,919 INFO [RS:0;a7948fca2832:44647 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:38:38,919 DEBUG [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/a7948fca2832,44647,1731757118449 2024-11-16T11:38:38,919 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a7948fca2832,44647,1731757118449] 2024-11-16T11:38:38,923 INFO [RS:0;a7948fca2832:44647 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T11:38:38,924 INFO [RS:0;a7948fca2832:44647 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T11:38:38,924 INFO [RS:0;a7948fca2832:44647 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T11:38:38,924 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,925 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T11:38:38,925 INFO [RS:0;a7948fca2832:44647 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T11:38:38,925 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,925 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a7948fca2832:0, corePoolSize=2, maxPoolSize=2 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a7948fca2832:0, corePoolSize=1, maxPoolSize=1 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:38:38,926 DEBUG [RS:0;a7948fca2832:44647 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a7948fca2832:0, corePoolSize=3, maxPoolSize=3 2024-11-16T11:38:38,926 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,926 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,927 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,927 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,927 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,927 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,44647,1731757118449-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:38:38,944 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T11:38:38,944 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,44647,1731757118449-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,944 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,944 INFO [RS:0;a7948fca2832:44647 {}] regionserver.Replication(171): a7948fca2832,44647,1731757118449 started 2024-11-16T11:38:38,955 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:38,955 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(1482): Serving as a7948fca2832,44647,1731757118449, RpcServer on a7948fca2832/172.17.0.2:44647, sessionid=0x10143708ee70001 2024-11-16T11:38:38,955 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T11:38:38,955 DEBUG [RS:0;a7948fca2832:44647 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a7948fca2832,44647,1731757118449 2024-11-16T11:38:38,955 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,44647,1731757118449' 2024-11-16T11:38:38,955 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T11:38:38,956 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T11:38:38,956 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T11:38:38,956 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T11:38:38,956 DEBUG [RS:0;a7948fca2832:44647 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a7948fca2832,44647,1731757118449 2024-11-16T11:38:38,956 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a7948fca2832,44647,1731757118449' 2024-11-16T11:38:38,956 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T11:38:38,957 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T11:38:38,957 DEBUG [RS:0;a7948fca2832:44647 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T11:38:38,957 INFO [RS:0;a7948fca2832:44647 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T11:38:38,957 INFO [RS:0;a7948fca2832:44647 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T11:38:38,968 WARN [a7948fca2832:32973 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T11:38:39,061 INFO [RS:0;a7948fca2832:44647 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C44647%2C1731757118449, suffix=, logDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/a7948fca2832,44647,1731757118449, archiveDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/oldWALs, maxLogs=32 2024-11-16T11:38:39,062 INFO [RS:0;a7948fca2832:44647 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C44647%2C1731757118449.1731757119061 2024-11-16T11:38:39,071 INFO [RS:0;a7948fca2832:44647 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/a7948fca2832,44647,1731757118449/a7948fca2832%2C44647%2C1731757118449.1731757119061 2024-11-16T11:38:39,073 DEBUG [RS:0;a7948fca2832:44647 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35293:35293),(127.0.0.1/127.0.0.1:38873:38873)] 2024-11-16T11:38:39,218 DEBUG [a7948fca2832:32973 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T11:38:39,219 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a7948fca2832,44647,1731757118449 2024-11-16T11:38:39,223 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,44647,1731757118449, state=OPENING 2024-11-16T11:38:39,277 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T11:38:39,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:39,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:39,288 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T11:38:39,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,44647,1731757118449}] 2024-11-16T11:38:39,289 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:38:39,289 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:38:39,443 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T11:38:39,447 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47947, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T11:38:39,454 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T11:38:39,454 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:38:39,456 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a7948fca2832%2C44647%2C1731757118449.meta, suffix=.meta, logDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/a7948fca2832,44647,1731757118449, archiveDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/oldWALs, maxLogs=32 2024-11-16T11:38:39,457 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a7948fca2832%2C44647%2C1731757118449.meta.1731757119457.meta 2024-11-16T11:38:39,466 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/a7948fca2832,44647,1731757118449/a7948fca2832%2C44647%2C1731757118449.meta.1731757119457.meta 2024-11-16T11:38:39,471 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38873:38873),(127.0.0.1/127.0.0.1:35293:35293)] 2024-11-16T11:38:39,472 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T11:38:39,473 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T11:38:39,473 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T11:38:39,473 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T11:38:39,473 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T11:38:39,473 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T11:38:39,473 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T11:38:39,473 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T11:38:39,474 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T11:38:39,475 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T11:38:39,475 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:39,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:38:39,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T11:38:39,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T11:38:39,476 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:39,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:38:39,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T11:38:39,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T11:38:39,477 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:39,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:38:39,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T11:38:39,477 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T11:38:39,477 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T11:38:39,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T11:38:39,478 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T11:38:39,478 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740 2024-11-16T11:38:39,479 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740 2024-11-16T11:38:39,480 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T11:38:39,481 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T11:38:39,481 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T11:38:39,482 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T11:38:39,483 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859670, jitterRate=0.09312711656093597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T11:38:39,483 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T11:38:39,483 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731757119473Writing region info on filesystem at 1731757119473Initializing all the Stores at 1731757119474 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757119474Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757119474Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731757119474Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731757119474Cleaning up temporary data from old regions at 1731757119481 (+7 ms)Running coprocessor post-open hooks at 1731757119483 (+2 ms)Region opened successfully at 1731757119483 2024-11-16T11:38:39,484 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731757119442 2024-11-16T11:38:39,486 DEBUG [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T11:38:39,486 INFO [RS_OPEN_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T11:38:39,487 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a7948fca2832,44647,1731757118449 2024-11-16T11:38:39,488 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a7948fca2832,44647,1731757118449, state=OPEN 2024-11-16T11:38:39,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:38:39,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T11:38:39,530 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a7948fca2832,44647,1731757118449 2024-11-16T11:38:39,530 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:38:39,530 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T11:38:39,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T11:38:39,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a7948fca2832,44647,1731757118449 in 241 msec 2024-11-16T11:38:39,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T11:38:39,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 721 msec 2024-11-16T11:38:39,541 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T11:38:39,541 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T11:38:39,557 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:38:39,557 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,44647,1731757118449, seqNum=-1] 2024-11-16T11:38:39,557 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:38:39,558 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59777, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:38:39,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 784 msec 2024-11-16T11:38:39,563 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731757119563, completionTime=-1 2024-11-16T11:38:39,563 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T11:38:39,563 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T11:38:39,565 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T11:38:39,565 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731757179565 2024-11-16T11:38:39,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731757239566 2024-11-16T11:38:39,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T11:38:39,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32973,1731757118280-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:39,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32973,1731757118280-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:39,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32973,1731757118280-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:39,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a7948fca2832:32973, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:39,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:39,566 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:39,568 DEBUG [master/a7948fca2832:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T11:38:39,570 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.050sec 2024-11-16T11:38:39,570 INFO [master/a7948fca2832:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T11:38:39,570 INFO [master/a7948fca2832:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T11:38:39,570 INFO [master/a7948fca2832:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T11:38:39,570 INFO [master/a7948fca2832:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T11:38:39,570 INFO [master/a7948fca2832:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T11:38:39,570 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32973,1731757118280-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T11:38:39,570 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32973,1731757118280-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T11:38:39,572 DEBUG [master/a7948fca2832:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T11:38:39,572 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T11:38:39,572 INFO [master/a7948fca2832:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a7948fca2832,32973,1731757118280-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T11:38:39,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e9644ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:38:39,572 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a7948fca2832,32973,-1 for getting cluster id 2024-11-16T11:38:39,573 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T11:38:39,574 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00bdfb38-bd65-4f6a-bf52-b1836ac01501' 2024-11-16T11:38:39,574 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T11:38:39,574 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00bdfb38-bd65-4f6a-bf52-b1836ac01501" 2024-11-16T11:38:39,574 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f0d5326, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:38:39,574 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a7948fca2832,32973,-1] 2024-11-16T11:38:39,574 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T11:38:39,575 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:39,575 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41258, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T11:38:39,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@148032d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T11:38:39,576 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T11:38:39,577 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a7948fca2832,44647,1731757118449, seqNum=-1] 2024-11-16T11:38:39,577 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T11:38:39,578 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44628, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T11:38:39,579 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a7948fca2832,32973,1731757118280 2024-11-16T11:38:39,580 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T11:38:39,582 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T11:38:39,582 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T11:38:39,584 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/test.com,8080,1, archiveDir=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/oldWALs, maxLogs=32 2024-11-16T11:38:39,584 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731757119584 2024-11-16T11:38:39,589 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/test.com,8080,1/test.com%2C8080%2C1.1731757119584 2024-11-16T11:38:39,590 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38873:38873),(127.0.0.1/127.0.0.1:35293:35293)] 2024-11-16T11:38:39,591 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731757119590 2024-11-16T11:38:39,595 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,596 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,596 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,596 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,596 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,596 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/test.com,8080,1/test.com%2C8080%2C1.1731757119584 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/test.com,8080,1/test.com%2C8080%2C1.1731757119590 2024-11-16T11:38:39,597 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35293:35293),(127.0.0.1/127.0.0.1:38873:38873)] 2024-11-16T11:38:39,597 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/test.com,8080,1/test.com%2C8080%2C1.1731757119584 is not closed yet, will try archiving it next time 2024-11-16T11:38:39,598 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,598 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,598 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741835_1011 (size=93) 2024-11-16T11:38:39,598 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741835_1011 (size=93) 2024-11-16T11:38:39,598 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,599 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/WALs/test.com,8080,1/test.com%2C8080%2C1.1731757119584 to hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/oldWALs/test.com%2C8080%2C1.1731757119584 2024-11-16T11:38:39,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741836_1012 (size=93) 2024-11-16T11:38:39,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741836_1012 (size=93) 2024-11-16T11:38:39,602 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/oldWALs 2024-11-16T11:38:39,602 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731757119590) 2024-11-16T11:38:39,602 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T11:38:39,602 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:38:39,603 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:38:39,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:39,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:39,603 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T11:38:39,603 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T11:38:39,603 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1583668056, stopped=false 2024-11-16T11:38:39,603 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a7948fca2832,32973,1731757118280 2024-11-16T11:38:39,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:38:39,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T11:38:39,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:39,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:39,624 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:38:39,624 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T11:38:39,625 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:38:39,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:39,625 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:38:39,625 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T11:38:39,625 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a7948fca2832,44647,1731757118449' ***** 2024-11-16T11:38:39,625 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T11:38:39,625 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T11:38:39,625 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T11:38:39,626 INFO [RS:0;a7948fca2832:44647 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T11:38:39,626 INFO [RS:0;a7948fca2832:44647 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T11:38:39,626 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(959): stopping server a7948fca2832,44647,1731757118449 2024-11-16T11:38:39,626 INFO [RS:0;a7948fca2832:44647 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:38:39,626 INFO [RS:0;a7948fca2832:44647 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a7948fca2832:44647. 2024-11-16T11:38:39,626 DEBUG [RS:0;a7948fca2832:44647 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T11:38:39,626 DEBUG [RS:0;a7948fca2832:44647 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:39,626 INFO [RS:0;a7948fca2832:44647 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T11:38:39,626 INFO [RS:0;a7948fca2832:44647 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T11:38:39,626 INFO [RS:0;a7948fca2832:44647 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T11:38:39,626 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T11:38:39,627 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T11:38:39,627 DEBUG [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T11:38:39,627 DEBUG [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T11:38:39,627 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T11:38:39,627 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T11:38:39,627 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T11:38:39,627 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T11:38:39,627 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T11:38:39,627 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T11:38:39,645 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740/.tmp/ns/7b408a836b4e43848418db4c434b1924 is 43, key is default/ns:d/1731757119559/Put/seqid=0 2024-11-16T11:38:39,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741837_1013 (size=5153) 2024-11-16T11:38:39,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741837_1013 (size=5153) 2024-11-16T11:38:39,649 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740/.tmp/ns/7b408a836b4e43848418db4c434b1924 2024-11-16T11:38:39,653 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740/.tmp/ns/7b408a836b4e43848418db4c434b1924 as hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740/ns/7b408a836b4e43848418db4c434b1924 2024-11-16T11:38:39,657 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740/ns/7b408a836b4e43848418db4c434b1924, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T11:38:39,658 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-16T11:38:39,658 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T11:38:39,662 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T11:38:39,662 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T11:38:39,662 INFO [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T11:38:39,662 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731757119627Running coprocessor pre-close hooks at 1731757119627Disabling compacts and flushes for region at 1731757119627Disabling writes for close at 1731757119627Obtaining lock to block concurrent updates at 1731757119627Preparing flush snapshotting stores in 1588230740 at 1731757119627Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731757119628 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731757119629 (+1 ms)Flushing 1588230740/ns: creating writer at 1731757119629Flushing 1588230740/ns: appending metadata at 1731757119645 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731757119645Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e5e7078: reopening flushed file at 1731757119653 (+8 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1731757119658 (+5 ms)Writing region close event to WAL at 1731757119659 (+1 ms)Running coprocessor post-close hooks at 1731757119662 (+3 ms)Closed at 1731757119662 2024-11-16T11:38:39,663 DEBUG [RS_CLOSE_META-regionserver/a7948fca2832:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T11:38:39,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,46401,1731756926229/a7948fca2832%2C46401%2C1731756926229.1731756926462 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:39,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39669/user/jenkins/test-data/3e330cc3-c2bd-c2c2-4547-aac30dbe758b/WALs/a7948fca2832,40843,1731756924773/a7948fca2832%2C40843%2C1731756924773.meta.1731756925986.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T11:38:39,827 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(976): stopping server a7948fca2832,44647,1731757118449; all regions closed. 2024-11-16T11:38:39,828 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,829 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,829 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,829 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,830 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741834_1010 (size=1152) 2024-11-16T11:38:39,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741834_1010 (size=1152) 2024-11-16T11:38:39,841 DEBUG [RS:0;a7948fca2832:44647 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/oldWALs 2024-11-16T11:38:39,841 INFO [RS:0;a7948fca2832:44647 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C44647%2C1731757118449.meta:.meta(num 1731757119457) 2024-11-16T11:38:39,841 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,841 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,842 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,842 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,842 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:39,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741833_1009 (size=93) 2024-11-16T11:38:39,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741833_1009 (size=93) 2024-11-16T11:38:39,845 DEBUG [RS:0;a7948fca2832:44647 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/oldWALs 2024-11-16T11:38:39,845 INFO [RS:0;a7948fca2832:44647 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a7948fca2832%2C44647%2C1731757118449:(num 1731757119061) 2024-11-16T11:38:39,845 DEBUG [RS:0;a7948fca2832:44647 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T11:38:39,845 INFO [RS:0;a7948fca2832:44647 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T11:38:39,845 INFO [RS:0;a7948fca2832:44647 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:38:39,845 INFO [RS:0;a7948fca2832:44647 {}] hbase.ChoreService(370): Chore service for: regionserver/a7948fca2832:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T11:38:39,846 INFO [RS:0;a7948fca2832:44647 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:38:39,846 INFO [regionserver/a7948fca2832:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:38:39,846 INFO [RS:0;a7948fca2832:44647 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44647 2024-11-16T11:38:39,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T11:38:39,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a7948fca2832,44647,1731757118449 2024-11-16T11:38:39,855 INFO [RS:0;a7948fca2832:44647 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:38:39,866 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a7948fca2832,44647,1731757118449] 2024-11-16T11:38:39,876 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a7948fca2832,44647,1731757118449 already deleted, retry=false 2024-11-16T11:38:39,876 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a7948fca2832,44647,1731757118449 expired; onlineServers=0 2024-11-16T11:38:39,876 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a7948fca2832,32973,1731757118280' ***** 2024-11-16T11:38:39,876 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T11:38:39,876 INFO [M:0;a7948fca2832:32973 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T11:38:39,876 INFO [M:0;a7948fca2832:32973 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T11:38:39,876 DEBUG [M:0;a7948fca2832:32973 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T11:38:39,877 DEBUG [M:0;a7948fca2832:32973 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T11:38:39,877 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T11:38:39,877 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731757118788 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.large.0-1731757118788,5,FailOnTimeoutGroup] 2024-11-16T11:38:39,877 DEBUG [master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731757118788 {}] cleaner.HFileCleaner(306): Exit Thread[master/a7948fca2832:0:becomeActiveMaster-HFileCleaner.small.0-1731757118788,5,FailOnTimeoutGroup] 2024-11-16T11:38:39,877 INFO [M:0;a7948fca2832:32973 {}] hbase.ChoreService(370): Chore service for: master/a7948fca2832:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T11:38:39,877 INFO [M:0;a7948fca2832:32973 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T11:38:39,877 DEBUG [M:0;a7948fca2832:32973 {}] master.HMaster(1795): Stopping service threads 2024-11-16T11:38:39,877 INFO [M:0;a7948fca2832:32973 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T11:38:39,877 INFO [M:0;a7948fca2832:32973 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T11:38:39,877 INFO [M:0;a7948fca2832:32973 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T11:38:39,877 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T11:38:39,887 DEBUG [M:0;a7948fca2832:32973 {}] zookeeper.ZKUtil(347): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T11:38:39,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T11:38:39,887 WARN [M:0;a7948fca2832:32973 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T11:38:39,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T11:38:39,888 INFO [M:0;a7948fca2832:32973 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/.lastflushedseqids 2024-11-16T11:38:39,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741838_1014 (size=99) 2024-11-16T11:38:39,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741838_1014 (size=99) 2024-11-16T11:38:39,893 INFO [M:0;a7948fca2832:32973 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T11:38:39,894 INFO [M:0;a7948fca2832:32973 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T11:38:39,894 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T11:38:39,894 INFO [M:0;a7948fca2832:32973 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:39,894 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:39,894 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T11:38:39,894 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:39,894 INFO [M:0;a7948fca2832:32973 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T11:38:39,914 DEBUG [M:0;a7948fca2832:32973 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/08d1339661a24634a26b62bc23ce0fe4 is 82, key is hbase:meta,,1/info:regioninfo/1731757119487/Put/seqid=0 2024-11-16T11:38:39,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741839_1015 (size=5672) 2024-11-16T11:38:39,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741839_1015 (size=5672) 2024-11-16T11:38:39,918 INFO [M:0;a7948fca2832:32973 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/08d1339661a24634a26b62bc23ce0fe4 2024-11-16T11:38:39,935 DEBUG [M:0;a7948fca2832:32973 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4548eda6b86548ae834b3089fc262b1b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731757119563/Put/seqid=0 2024-11-16T11:38:39,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741840_1016 (size=5275) 2024-11-16T11:38:39,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741840_1016 (size=5275) 2024-11-16T11:38:39,940 INFO [M:0;a7948fca2832:32973 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4548eda6b86548ae834b3089fc262b1b 2024-11-16T11:38:39,955 DEBUG [M:0;a7948fca2832:32973 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1b2bd06b98084a318148dc1ece0085d4 is 69, key is a7948fca2832,44647,1731757118449/rs:state/1731757118912/Put/seqid=0 2024-11-16T11:38:39,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741841_1017 (size=5156) 2024-11-16T11:38:39,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741841_1017 (size=5156) 2024-11-16T11:38:39,960 INFO [M:0;a7948fca2832:32973 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1b2bd06b98084a318148dc1ece0085d4 2024-11-16T11:38:39,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:38:39,966 INFO [RS:0;a7948fca2832:44647 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:38:39,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44647-0x10143708ee70001, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:38:39,966 INFO [RS:0;a7948fca2832:44647 {}] regionserver.HRegionServer(1031): Exiting; stopping=a7948fca2832,44647,1731757118449; zookeeper connection closed. 2024-11-16T11:38:39,966 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6a301469 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6a301469 2024-11-16T11:38:39,966 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T11:38:39,975 DEBUG [M:0;a7948fca2832:32973 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/afee89cd98eb409b9269d8e4d33eb588 is 52, key is load_balancer_on/state:d/1731757119581/Put/seqid=0 2024-11-16T11:38:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741842_1018 (size=5056) 2024-11-16T11:38:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741842_1018 (size=5056) 2024-11-16T11:38:39,980 INFO [M:0;a7948fca2832:32973 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/afee89cd98eb409b9269d8e4d33eb588 2024-11-16T11:38:39,983 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/08d1339661a24634a26b62bc23ce0fe4 as hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/08d1339661a24634a26b62bc23ce0fe4 2024-11-16T11:38:39,987 INFO [M:0;a7948fca2832:32973 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/08d1339661a24634a26b62bc23ce0fe4, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T11:38:39,987 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4548eda6b86548ae834b3089fc262b1b as hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4548eda6b86548ae834b3089fc262b1b 2024-11-16T11:38:39,991 INFO [M:0;a7948fca2832:32973 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4548eda6b86548ae834b3089fc262b1b, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T11:38:39,991 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1b2bd06b98084a318148dc1ece0085d4 as hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1b2bd06b98084a318148dc1ece0085d4 2024-11-16T11:38:39,995 INFO [M:0;a7948fca2832:32973 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1b2bd06b98084a318148dc1ece0085d4, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T11:38:39,996 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/afee89cd98eb409b9269d8e4d33eb588 as hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/afee89cd98eb409b9269d8e4d33eb588 2024-11-16T11:38:40,000 INFO [M:0;a7948fca2832:32973 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44661/user/jenkins/test-data/36080137-c31e-b55b-9979-486591ab6c61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/afee89cd98eb409b9269d8e4d33eb588, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T11:38:40,001 INFO [M:0;a7948fca2832:32973 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false 2024-11-16T11:38:40,002 INFO [M:0;a7948fca2832:32973 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T11:38:40,002 DEBUG [M:0;a7948fca2832:32973 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731757119894Disabling compacts and flushes for region at 1731757119894Disabling writes for close at 1731757119894Obtaining lock to block concurrent updates at 1731757119894Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731757119894Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731757119895 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731757119896 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731757119896Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731757119913 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731757119913Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731757119921 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731757119935 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731757119935Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731757119943 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731757119955 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731757119955Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731757119963 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731757119975 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731757119975Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@766fc152: reopening flushed file at 1731757119983 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@784edd7c: reopening flushed file at 1731757119987 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@104c898e: reopening flushed file at 1731757119991 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c906c3d: reopening flushed file at 1731757119995 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false at 1731757120001 (+6 ms)Writing region close event to WAL at 1731757120002 (+1 ms)Closed at 1731757120002 2024-11-16T11:38:40,003 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:40,003 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:40,004 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:40,004 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:40,004 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T11:38:40,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45477 is added to blk_1073741830_1006 (size=10311) 2024-11-16T11:38:40,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37559 is added to blk_1073741830_1006 (size=10311) 2024-11-16T11:38:40,006 INFO [M:0;a7948fca2832:32973 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T11:38:40,006 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T11:38:40,006 INFO [M:0;a7948fca2832:32973 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32973 2024-11-16T11:38:40,006 INFO [M:0;a7948fca2832:32973 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T11:38:40,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:38:40,113 INFO [M:0;a7948fca2832:32973 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T11:38:40,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x10143708ee70000, quorum=127.0.0.1:55582, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T11:38:40,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d4e388c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:38:40,120 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ca64d19{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:38:40,120 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:38:40,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e16d0af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:38:40,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@552c31d5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/hadoop.log.dir/,STOPPED} 2024-11-16T11:38:40,123 WARN [BP-1876677641-172.17.0.2-1731757115891 heartbeating to localhost/127.0.0.1:44661 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:38:40,123 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:38:40,123 WARN [BP-1876677641-172.17.0.2-1731757115891 heartbeating to localhost/127.0.0.1:44661 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1876677641-172.17.0.2-1731757115891 (Datanode Uuid a1050fcb-27f8-4965-b162-65cc29a62bb8) service to localhost/127.0.0.1:44661 2024-11-16T11:38:40,123 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:38:40,124 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/data/data3/current/BP-1876677641-172.17.0.2-1731757115891 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:38:40,124 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/data/data4/current/BP-1876677641-172.17.0.2-1731757115891 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:38:40,124 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:38:40,126 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a318fe4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T11:38:40,126 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36d9a593{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:38:40,126 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:38:40,126 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7af5dd93{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:38:40,126 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@677ce9c2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/hadoop.log.dir/,STOPPED} 2024-11-16T11:38:40,127 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T11:38:40,127 WARN [BP-1876677641-172.17.0.2-1731757115891 heartbeating to localhost/127.0.0.1:44661 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T11:38:40,127 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T11:38:40,127 WARN [BP-1876677641-172.17.0.2-1731757115891 heartbeating to localhost/127.0.0.1:44661 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1876677641-172.17.0.2-1731757115891 (Datanode Uuid f2f8ac50-1501-46e7-a726-d0f6723dae44) service to localhost/127.0.0.1:44661 2024-11-16T11:38:40,128 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/data/data1/current/BP-1876677641-172.17.0.2-1731757115891 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:38:40,128 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/cluster_534d0645-9ce3-b1ed-6c2b-fe01a1ea3b92/data/data2/current/BP-1876677641-172.17.0.2-1731757115891 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T11:38:40,128 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T11:38:40,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a3da410{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T11:38:40,133 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69e6c21{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T11:38:40,133 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T11:38:40,133 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@285385d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T11:38:40,133 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d39a950{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1b9cd825-bab1-c0b4-a609-e0bad3e2006c/hadoop.log.dir/,STOPPED} 2024-11-16T11:38:40,139 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T11:38:40,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T11:38:40,161 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 231) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44661 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44661 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:44661 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:44661 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (785504411) connection to localhost/127.0.0.1:44661 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:44661 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44661 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44661 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=168 (was 174), ProcessCount=11 (was 11), AvailableMemoryMB=4466 (was 4465) - AvailableMemoryMB LEAK? -